Test Report: KVM_Linux 21966

                    
                      f7c9a93757611cb83a7bfb680dda9add42d627cb:2025-11-23:42464
                    
                

Test fail (10/366)

x
+
TestStartStop/group/no-preload/serial/Pause (40.5s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/Pause
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-amd64 pause -p no-preload-019660 --alsologtostderr -v=1
start_stop_delete_test.go:309: (dbg) Done: out/minikube-linux-amd64 pause -p no-preload-019660 --alsologtostderr -v=1: (1.571604429s)
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-amd64 status --format={{.APIServer}} -p no-preload-019660 -n no-preload-019660
E1123 08:57:40.923908   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/calico-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:57:43.933855   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/custom-flannel-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:57:45.921635   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/false-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:57:45.928126   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/false-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:57:45.939747   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/false-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:57:45.961269   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/false-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:57:46.002876   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/false-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:57:46.084366   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/false-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:57:46.245911   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/false-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:57:46.567695   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/false-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:57:47.209951   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/false-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:57:48.491430   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/false-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:309: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.APIServer}} -p no-preload-019660 -n no-preload-019660: exit status 2 (15.763002847s)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:309: status error: exit status 2 (may be ok)
start_stop_delete_test.go:309: post-pause apiserver status = "Stopped"; want = "Paused"
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Kubelet}} -p no-preload-019660 -n no-preload-019660
start_stop_delete_test.go:309: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.Kubelet}} -p no-preload-019660 -n no-preload-019660: exit status 2 (15.764590369s)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:309: status error: exit status 2 (may be ok)
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-amd64 unpause -p no-preload-019660 --alsologtostderr -v=1
E1123 08:58:06.417197   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/false-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:309: (dbg) Done: out/minikube-linux-amd64 unpause -p no-preload-019660 --alsologtostderr -v=1: (1.01628798s)
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-amd64 status --format={{.APIServer}} -p no-preload-019660 -n no-preload-019660
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Kubelet}} -p no-preload-019660 -n no-preload-019660
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======>  post-mortem[TestStartStop/group/no-preload/serial/Pause]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:247: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Host}} -p no-preload-019660 -n no-preload-019660
helpers_test.go:252: <<< TestStartStop/group/no-preload/serial/Pause FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======>  post-mortem[TestStartStop/group/no-preload/serial/Pause]: minikube logs <======
helpers_test.go:255: (dbg) Run:  out/minikube-linux-amd64 -p no-preload-019660 logs -n 25
helpers_test.go:255: (dbg) Done: out/minikube-linux-amd64 -p no-preload-019660 logs -n 25: (1.823722377s)
helpers_test.go:260: TestStartStop/group/no-preload/serial/Pause logs: 
-- stdout --
	
	==> Audit <==
	┌─────────┬────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬──────────────────────────────┬─────────┬─────────┬─────────────────────┬─────────────────────┐
	│ COMMAND │                                                                                                        ARGS                                                                                                        │           PROFILE            │  USER   │ VERSION │     START TIME      │      END TIME       │
	├─────────┼────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼──────────────────────────────┼─────────┼─────────┼─────────────────────┼─────────────────────┤
	│ start   │ -p old-k8s-version-896471 --memory=3072 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=kvm2  --kubernetes-version=v1.28.0 │ old-k8s-version-896471       │ jenkins │ v1.37.0 │ 23 Nov 25 08:56 UTC │ 23 Nov 25 08:56 UTC │
	│ addons  │ enable metrics-server -p no-preload-019660 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain                                                                            │ no-preload-019660            │ jenkins │ v1.37.0 │ 23 Nov 25 08:56 UTC │ 23 Nov 25 08:56 UTC │
	│ stop    │ -p no-preload-019660 --alsologtostderr -v=3                                                                                                                                                                        │ no-preload-019660            │ jenkins │ v1.37.0 │ 23 Nov 25 08:56 UTC │ 23 Nov 25 08:56 UTC │
	│ addons  │ enable dashboard -p no-preload-019660 --images=MetricsScraper=registry.k8s.io/echoserver:1.4                                                                                                                       │ no-preload-019660            │ jenkins │ v1.37.0 │ 23 Nov 25 08:56 UTC │ 23 Nov 25 08:56 UTC │
	│ start   │ -p no-preload-019660 --memory=3072 --alsologtostderr --wait=true --preload=false --driver=kvm2  --kubernetes-version=v1.34.1                                                                                       │ no-preload-019660            │ jenkins │ v1.37.0 │ 23 Nov 25 08:56 UTC │ 23 Nov 25 08:57 UTC │
	│ addons  │ enable metrics-server -p embed-certs-059363 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain                                                                           │ embed-certs-059363           │ jenkins │ v1.37.0 │ 23 Nov 25 08:56 UTC │ 23 Nov 25 08:56 UTC │
	│ stop    │ -p embed-certs-059363 --alsologtostderr -v=3                                                                                                                                                                       │ embed-certs-059363           │ jenkins │ v1.37.0 │ 23 Nov 25 08:56 UTC │ 23 Nov 25 08:56 UTC │
	│ addons  │ enable dashboard -p embed-certs-059363 --images=MetricsScraper=registry.k8s.io/echoserver:1.4                                                                                                                      │ embed-certs-059363           │ jenkins │ v1.37.0 │ 23 Nov 25 08:56 UTC │ 23 Nov 25 08:56 UTC │
	│ start   │ -p embed-certs-059363 --memory=3072 --alsologtostderr --wait=true --embed-certs --driver=kvm2  --kubernetes-version=v1.34.1                                                                                        │ embed-certs-059363           │ jenkins │ v1.37.0 │ 23 Nov 25 08:56 UTC │ 23 Nov 25 08:57 UTC │
	│ addons  │ enable metrics-server -p default-k8s-diff-port-925051 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain                                                                 │ default-k8s-diff-port-925051 │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │ 23 Nov 25 08:57 UTC │
	│ stop    │ -p default-k8s-diff-port-925051 --alsologtostderr -v=3                                                                                                                                                             │ default-k8s-diff-port-925051 │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │ 23 Nov 25 08:57 UTC │
	│ image   │ old-k8s-version-896471 image list --format=json                                                                                                                                                                    │ old-k8s-version-896471       │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │ 23 Nov 25 08:57 UTC │
	│ pause   │ -p old-k8s-version-896471 --alsologtostderr -v=1                                                                                                                                                                   │ old-k8s-version-896471       │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │ 23 Nov 25 08:57 UTC │
	│ unpause │ -p old-k8s-version-896471 --alsologtostderr -v=1                                                                                                                                                                   │ old-k8s-version-896471       │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │ 23 Nov 25 08:57 UTC │
	│ delete  │ -p old-k8s-version-896471                                                                                                                                                                                          │ old-k8s-version-896471       │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │ 23 Nov 25 08:57 UTC │
	│ delete  │ -p old-k8s-version-896471                                                                                                                                                                                          │ old-k8s-version-896471       │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │ 23 Nov 25 08:57 UTC │
	│ start   │ -p newest-cni-078196 --memory=3072 --alsologtostderr --wait=apiserver,system_pods,default_sa --network-plugin=cni --extra-config=kubeadm.pod-network-cidr=10.42.0.0/16 --driver=kvm2  --kubernetes-version=v1.34.1 │ newest-cni-078196            │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │                     │
	│ addons  │ enable dashboard -p default-k8s-diff-port-925051 --images=MetricsScraper=registry.k8s.io/echoserver:1.4                                                                                                            │ default-k8s-diff-port-925051 │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │ 23 Nov 25 08:57 UTC │
	│ start   │ -p default-k8s-diff-port-925051 --memory=3072 --alsologtostderr --wait=true --apiserver-port=8444 --driver=kvm2  --kubernetes-version=v1.34.1                                                                      │ default-k8s-diff-port-925051 │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │                     │
	│ image   │ no-preload-019660 image list --format=json                                                                                                                                                                         │ no-preload-019660            │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │ 23 Nov 25 08:57 UTC │
	│ pause   │ -p no-preload-019660 --alsologtostderr -v=1                                                                                                                                                                        │ no-preload-019660            │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │ 23 Nov 25 08:57 UTC │
	│ image   │ embed-certs-059363 image list --format=json                                                                                                                                                                        │ embed-certs-059363           │ jenkins │ v1.37.0 │ 23 Nov 25 08:58 UTC │ 23 Nov 25 08:58 UTC │
	│ pause   │ -p embed-certs-059363 --alsologtostderr -v=1                                                                                                                                                                       │ embed-certs-059363           │ jenkins │ v1.37.0 │ 23 Nov 25 08:58 UTC │ 23 Nov 25 08:58 UTC │
	│ unpause │ -p no-preload-019660 --alsologtostderr -v=1                                                                                                                                                                        │ no-preload-019660            │ jenkins │ v1.37.0 │ 23 Nov 25 08:58 UTC │ 23 Nov 25 08:58 UTC │
	│ unpause │ -p embed-certs-059363 --alsologtostderr -v=1                                                                                                                                                                       │ embed-certs-059363           │ jenkins │ v1.37.0 │ 23 Nov 25 08:58 UTC │ 23 Nov 25 08:58 UTC │
	└─────────┴────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────────────────────────────┴─────────┴─────────┴─────────────────────┴─────────────────────┘
	
	
	==> Last Start <==
	Log file created at: 2025/11/23 08:57:16
	Running on machine: ubuntu-20-agent-3
	Binary: Built with gc go1.25.3 for linux/amd64
	Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
	I1123 08:57:16.853497   62480 out.go:360] Setting OutFile to fd 1 ...
	I1123 08:57:16.853743   62480 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I1123 08:57:16.853753   62480 out.go:374] Setting ErrFile to fd 2...
	I1123 08:57:16.853757   62480 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I1123 08:57:16.854434   62480 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21966-18241/.minikube/bin
	I1123 08:57:16.855203   62480 out.go:368] Setting JSON to false
	I1123 08:57:16.856605   62480 start.go:133] hostinfo: {"hostname":"ubuntu-20-agent-3","uptime":5986,"bootTime":1763882251,"procs":197,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"22.04","kernelVersion":"6.8.0-1044-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
	I1123 08:57:16.856696   62480 start.go:143] virtualization: kvm guest
	I1123 08:57:16.935723   62480 out.go:179] * [default-k8s-diff-port-925051] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
	I1123 08:57:16.941914   62480 out.go:179]   - MINIKUBE_LOCATION=21966
	I1123 08:57:16.941916   62480 notify.go:221] Checking for updates...
	I1123 08:57:16.943817   62480 out.go:179]   - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I1123 08:57:16.945573   62480 out.go:179]   - KUBECONFIG=/home/jenkins/minikube-integration/21966-18241/kubeconfig
	I1123 08:57:16.946745   62480 out.go:179]   - MINIKUBE_HOME=/home/jenkins/minikube-integration/21966-18241/.minikube
	I1123 08:57:16.947938   62480 out.go:179]   - MINIKUBE_BIN=out/minikube-linux-amd64
	I1123 08:57:16.949027   62480 out.go:179]   - MINIKUBE_FORCE_SYSTEMD=
	I1123 08:57:16.950511   62480 config.go:182] Loaded profile config "default-k8s-diff-port-925051": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
	I1123 08:57:16.951037   62480 driver.go:422] Setting default libvirt URI to qemu:///system
	I1123 08:57:16.994324   62480 out.go:179] * Using the kvm2 driver based on existing profile
	I1123 08:57:16.995670   62480 start.go:309] selected driver: kvm2
	I1123 08:57:16.995691   62480 start.go:927] validating driver "kvm2" against &{Name:default-k8s-diff-port-925051 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21924/minikube-v1.37.0-1763503576-21924-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8444 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesCo
nfig:{KubernetesVersion:v1.34.1 ClusterName:default-k8s-diff-port-925051 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.83.137 Port:8444 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] L
istenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I1123 08:57:16.995851   62480 start.go:938] status for kvm2: {Installed:true Healthy:true Running:true NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
	I1123 08:57:16.997354   62480 start_flags.go:992] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
	I1123 08:57:16.997396   62480 cni.go:84] Creating CNI manager for ""
	I1123 08:57:16.997466   62480 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
	I1123 08:57:16.997521   62480 start.go:353] cluster config:
	{Name:default-k8s-diff-port-925051 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21924/minikube-v1.37.0-1763503576-21924-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8444 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:default-k8s-diff-port-925051 Names
pace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.83.137 Port:8444 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpir
ation:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I1123 08:57:16.997662   62480 iso.go:125] acquiring lock: {Name:mk9cdb644d601a15f26caa6d527f7a63e06eb691 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I1123 08:57:16.999287   62480 out.go:179] * Starting "default-k8s-diff-port-925051" primary control-plane node in "default-k8s-diff-port-925051" cluster
	I1123 08:57:16.538965   62034 main.go:143] libmachine: SSH cmd err, output: <nil>: 
	I1123 08:57:16.543216   62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:16.543908   62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
	I1123 08:57:16.543934   62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:16.544164   62034 profile.go:143] Saving config to /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/embed-certs-059363/config.json ...
	I1123 08:57:16.544418   62034 machine.go:94] provisionDockerMachine start ...
	I1123 08:57:16.547123   62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:16.547583   62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
	I1123 08:57:16.547608   62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:16.547766   62034 main.go:143] libmachine: Using SSH client type: native
	I1123 08:57:16.547963   62034 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.72.170 22 <nil> <nil>}
	I1123 08:57:16.547972   62034 main.go:143] libmachine: About to run SSH command:
	hostname
	I1123 08:57:16.673771   62034 main.go:143] libmachine: SSH cmd err, output: <nil>: minikube
	
	I1123 08:57:16.673806   62034 buildroot.go:166] provisioning hostname "embed-certs-059363"
	I1123 08:57:16.677167   62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:16.677679   62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
	I1123 08:57:16.677711   62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:16.677931   62034 main.go:143] libmachine: Using SSH client type: native
	I1123 08:57:16.678192   62034 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.72.170 22 <nil> <nil>}
	I1123 08:57:16.678214   62034 main.go:143] libmachine: About to run SSH command:
	sudo hostname embed-certs-059363 && echo "embed-certs-059363" | sudo tee /etc/hostname
	I1123 08:57:16.832499   62034 main.go:143] libmachine: SSH cmd err, output: <nil>: embed-certs-059363
	
	I1123 08:57:16.837251   62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:16.837813   62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
	I1123 08:57:16.837855   62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:16.838109   62034 main.go:143] libmachine: Using SSH client type: native
	I1123 08:57:16.838438   62034 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.72.170 22 <nil> <nil>}
	I1123 08:57:16.838465   62034 main.go:143] libmachine: About to run SSH command:
	
			if ! grep -xq '.*\sembed-certs-059363' /etc/hosts; then
				if grep -xq '127.0.1.1\s.*' /etc/hosts; then
					sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 embed-certs-059363/g' /etc/hosts;
				else 
					echo '127.0.1.1 embed-certs-059363' | sudo tee -a /etc/hosts; 
				fi
			fi
	I1123 08:57:16.972318   62034 main.go:143] libmachine: SSH cmd err, output: <nil>: 
	I1123 08:57:16.972350   62034 buildroot.go:172] set auth options {CertDir:/home/jenkins/minikube-integration/21966-18241/.minikube CaCertPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21966-18241/.minikube}
	I1123 08:57:16.972374   62034 buildroot.go:174] setting up certificates
	I1123 08:57:16.972395   62034 provision.go:84] configureAuth start
	I1123 08:57:16.976994   62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:16.977623   62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
	I1123 08:57:16.977662   62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:16.980665   62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:16.981134   62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
	I1123 08:57:16.981158   62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:16.981351   62034 provision.go:143] copyHostCerts
	I1123 08:57:16.981431   62034 exec_runner.go:144] found /home/jenkins/minikube-integration/21966-18241/.minikube/ca.pem, removing ...
	I1123 08:57:16.981446   62034 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21966-18241/.minikube/ca.pem
	I1123 08:57:16.981523   62034 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21966-18241/.minikube/ca.pem (1082 bytes)
	I1123 08:57:16.981635   62034 exec_runner.go:144] found /home/jenkins/minikube-integration/21966-18241/.minikube/cert.pem, removing ...
	I1123 08:57:16.981646   62034 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21966-18241/.minikube/cert.pem
	I1123 08:57:16.981690   62034 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21966-18241/.minikube/cert.pem (1123 bytes)
	I1123 08:57:16.981769   62034 exec_runner.go:144] found /home/jenkins/minikube-integration/21966-18241/.minikube/key.pem, removing ...
	I1123 08:57:16.981779   62034 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21966-18241/.minikube/key.pem
	I1123 08:57:16.981817   62034 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21966-18241/.minikube/key.pem (1675 bytes)
	I1123 08:57:16.981897   62034 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21966-18241/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca-key.pem org=jenkins.embed-certs-059363 san=[127.0.0.1 192.168.72.170 embed-certs-059363 localhost minikube]
	I1123 08:57:17.112794   62034 provision.go:177] copyRemoteCerts
	I1123 08:57:17.112848   62034 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
	I1123 08:57:17.115853   62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:17.116282   62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
	I1123 08:57:17.116308   62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:17.116478   62034 sshutil.go:53] new ssh client: &{IP:192.168.72.170 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/embed-certs-059363/id_rsa Username:docker}
	I1123 08:57:17.223809   62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
	I1123 08:57:17.266771   62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
	I1123 08:57:17.305976   62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/machines/server.pem --> /etc/docker/server.pem (1224 bytes)
	I1123 08:57:17.336820   62034 provision.go:87] duration metric: took 364.408049ms to configureAuth
	I1123 08:57:17.336863   62034 buildroot.go:189] setting minikube options for container-runtime
	I1123 08:57:17.337080   62034 config.go:182] Loaded profile config "embed-certs-059363": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
	I1123 08:57:17.339671   62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:17.340090   62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
	I1123 08:57:17.340112   62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:17.340318   62034 main.go:143] libmachine: Using SSH client type: native
	I1123 08:57:17.340623   62034 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.72.170 22 <nil> <nil>}
	I1123 08:57:17.340643   62034 main.go:143] libmachine: About to run SSH command:
	df --output=fstype / | tail -n 1
	I1123 08:57:17.463677   62034 main.go:143] libmachine: SSH cmd err, output: <nil>: tmpfs
	
	I1123 08:57:17.463707   62034 buildroot.go:70] root file system type: tmpfs
	I1123 08:57:17.463928   62034 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
	I1123 08:57:17.467227   62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:17.467655   62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
	I1123 08:57:17.467686   62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:17.467940   62034 main.go:143] libmachine: Using SSH client type: native
	I1123 08:57:17.468174   62034 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.72.170 22 <nil> <nil>}
	I1123 08:57:17.468268   62034 main.go:143] libmachine: About to run SSH command:
	sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
	Description=Docker Application Container Engine
	Documentation=https://docs.docker.com
	After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
	Wants=network-online.target containerd.service
	Requires=docker.socket
	StartLimitBurst=3
	StartLimitIntervalSec=60
	[Service]
	Type=notify
	Restart=always
	
	
	
	ExecStart=
	ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 \
		-H fd:// --containerd=/run/containerd/containerd.sock \
		-H unix:///var/run/docker.sock \
		--default-ulimit=nofile=1048576:1048576 \
		--tlsverify \
		--tlscacert /etc/docker/ca.pem \
		--tlscert /etc/docker/server.pem \
		--tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12 
	ExecReload=/bin/kill -s HUP \$MAINPID
	
	# Having non-zero Limit*s causes performance problems due to accounting overhead
	# in the kernel. We recommend using cgroups to do container-local accounting.
	LimitNOFILE=infinity
	LimitNPROC=infinity
	LimitCORE=infinity
	
	# Uncomment TasksMax if your systemd version supports it.
	# Only systemd 226 and above support this version.
	TasksMax=infinity
	
	# set delegate yes so that systemd does not reset the cgroups of docker containers
	Delegate=yes
	
	# kill only the docker process, not all processes in the cgroup
	KillMode=process
	OOMScoreAdjust=-500
	
	[Install]
	WantedBy=multi-user.target
	" | sudo tee /lib/systemd/system/docker.service.new
	I1123 08:57:17.602870   62034 main.go:143] libmachine: SSH cmd err, output: <nil>: [Unit]
	Description=Docker Application Container Engine
	Documentation=https://docs.docker.com
	After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
	Wants=network-online.target containerd.service
	Requires=docker.socket
	StartLimitBurst=3
	StartLimitIntervalSec=60
	[Service]
	Type=notify
	Restart=always
	
	
	
	ExecStart=
	ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 	-H fd:// --containerd=/run/containerd/containerd.sock 	-H unix:///var/run/docker.sock 	--default-ulimit=nofile=1048576:1048576 	--tlsverify 	--tlscacert /etc/docker/ca.pem 	--tlscert /etc/docker/server.pem 	--tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12 
	ExecReload=/bin/kill -s HUP $MAINPID
	
	# Having non-zero Limit*s causes performance problems due to accounting overhead
	# in the kernel. We recommend using cgroups to do container-local accounting.
	LimitNOFILE=infinity
	LimitNPROC=infinity
	LimitCORE=infinity
	
	# Uncomment TasksMax if your systemd version supports it.
	# Only systemd 226 and above support this version.
	TasksMax=infinity
	
	# set delegate yes so that systemd does not reset the cgroups of docker containers
	Delegate=yes
	
	# kill only the docker process, not all processes in the cgroup
	KillMode=process
	OOMScoreAdjust=-500
	
	[Install]
	WantedBy=multi-user.target
	
	I1123 08:57:17.606541   62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:17.607111   62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
	I1123 08:57:17.607152   62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:17.607427   62034 main.go:143] libmachine: Using SSH client type: native
	I1123 08:57:17.607698   62034 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.72.170 22 <nil> <nil>}
	I1123 08:57:17.607716   62034 main.go:143] libmachine: About to run SSH command:
	sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
	I1123 08:57:19.186051   62386 start.go:364] duration metric: took 9.989286317s to acquireMachinesLock for "newest-cni-078196"
	I1123 08:57:19.186120   62386 start.go:93] Provisioning new machine with config: &{Name:newest-cni-078196 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21924/minikube-v1.37.0-1763503576-21924-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{K
ubernetesVersion:v1.34.1 ClusterName:newest-cni-078196 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:kubeadm Key:pod-network-cidr Value:10.42.0.0/16}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:false default_sa:true extra:false kubelet:false node_ready:false system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:2
62144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}
	I1123 08:57:19.186215   62386 start.go:125] createHost starting for "" (driver="kvm2")
	W1123 08:57:15.950255   61684 pod_ready.go:104] pod "coredns-66bc5c9577-nj6pk" is not "Ready", error: <nil>
	W1123 08:57:17.951890   61684 pod_ready.go:104] pod "coredns-66bc5c9577-nj6pk" is not "Ready", error: <nil>
	I1123 08:57:19.962419   61684 pod_ready.go:94] pod "coredns-66bc5c9577-nj6pk" is "Ready"
	I1123 08:57:19.962449   61684 pod_ready.go:86] duration metric: took 8.021055049s for pod "coredns-66bc5c9577-nj6pk" in "kube-system" namespace to be "Ready" or be gone ...
	I1123 08:57:19.967799   61684 pod_ready.go:83] waiting for pod "etcd-no-preload-019660" in "kube-system" namespace to be "Ready" or be gone ...
	I1123 08:57:19.977812   61684 pod_ready.go:94] pod "etcd-no-preload-019660" is "Ready"
	I1123 08:57:19.977834   61684 pod_ready.go:86] duration metric: took 10.013782ms for pod "etcd-no-preload-019660" in "kube-system" namespace to be "Ready" or be gone ...
	I1123 08:57:19.982683   61684 pod_ready.go:83] waiting for pod "kube-apiserver-no-preload-019660" in "kube-system" namespace to be "Ready" or be gone ...
	I1123 08:57:19.992798   61684 pod_ready.go:94] pod "kube-apiserver-no-preload-019660" is "Ready"
	I1123 08:57:19.992831   61684 pod_ready.go:86] duration metric: took 10.122708ms for pod "kube-apiserver-no-preload-019660" in "kube-system" namespace to be "Ready" or be gone ...
	I1123 08:57:19.997939   61684 pod_ready.go:83] waiting for pod "kube-controller-manager-no-preload-019660" in "kube-system" namespace to be "Ready" or be gone ...
	I1123 08:57:20.145706   61684 pod_ready.go:94] pod "kube-controller-manager-no-preload-019660" is "Ready"
	I1123 08:57:20.145742   61684 pod_ready.go:86] duration metric: took 147.777309ms for pod "kube-controller-manager-no-preload-019660" in "kube-system" namespace to be "Ready" or be gone ...
	I1123 08:57:20.355205   61684 pod_ready.go:83] waiting for pod "kube-proxy-wlb9w" in "kube-system" namespace to be "Ready" or be gone ...
	I1123 08:57:20.756189   61684 pod_ready.go:94] pod "kube-proxy-wlb9w" is "Ready"
	I1123 08:57:20.756259   61684 pod_ready.go:86] duration metric: took 400.985169ms for pod "kube-proxy-wlb9w" in "kube-system" namespace to be "Ready" or be gone ...
	I1123 08:57:20.947647   61684 pod_ready.go:83] waiting for pod "kube-scheduler-no-preload-019660" in "kube-system" namespace to be "Ready" or be gone ...
	I1123 08:57:21.350509   61684 pod_ready.go:94] pod "kube-scheduler-no-preload-019660" is "Ready"
	I1123 08:57:21.350539   61684 pod_ready.go:86] duration metric: took 402.864201ms for pod "kube-scheduler-no-preload-019660" in "kube-system" namespace to be "Ready" or be gone ...
	I1123 08:57:21.350552   61684 pod_ready.go:40] duration metric: took 9.416731421s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
	I1123 08:57:21.405369   61684 start.go:625] kubectl: 1.34.2, cluster: 1.34.1 (minor skew: 0)
	I1123 08:57:21.409795   61684 out.go:179] * Done! kubectl is now configured to use "no-preload-019660" cluster and "default" namespace by default
	I1123 08:57:17.000521   62480 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime docker
	I1123 08:57:17.000560   62480 preload.go:203] Found local preload: /home/jenkins/minikube-integration/21966-18241/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-docker-overlay2-amd64.tar.lz4
	I1123 08:57:17.000571   62480 cache.go:65] Caching tarball of preloaded images
	I1123 08:57:17.000667   62480 preload.go:238] Found /home/jenkins/minikube-integration/21966-18241/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-docker-overlay2-amd64.tar.lz4 in cache, skipping download
	I1123 08:57:17.000683   62480 cache.go:68] Finished verifying existence of preloaded tar for v1.34.1 on docker
	I1123 08:57:17.000806   62480 profile.go:143] Saving config to /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/default-k8s-diff-port-925051/config.json ...
	I1123 08:57:17.001089   62480 start.go:360] acquireMachinesLock for default-k8s-diff-port-925051: {Name:mka7dedac533b164a995f5c19cff4f68d827bd22 Clock:{} Delay:500ms Timeout:13m0s Cancel:<nil>}
	I1123 08:57:18.895461   62034 main.go:143] libmachine: SSH cmd err, output: <nil>: diff: can't stat '/lib/systemd/system/docker.service': No such file or directory
	Created symlink '/etc/systemd/system/multi-user.target.wants/docker.service' → '/usr/lib/systemd/system/docker.service'.
	
	I1123 08:57:18.895495   62034 machine.go:97] duration metric: took 2.351059819s to provisionDockerMachine
	I1123 08:57:18.895519   62034 start.go:293] postStartSetup for "embed-certs-059363" (driver="kvm2")
	I1123 08:57:18.895547   62034 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
	I1123 08:57:18.895631   62034 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
	I1123 08:57:18.899037   62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:18.899549   62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
	I1123 08:57:18.899585   62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:18.899747   62034 sshutil.go:53] new ssh client: &{IP:192.168.72.170 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/embed-certs-059363/id_rsa Username:docker}
	I1123 08:57:18.995822   62034 ssh_runner.go:195] Run: cat /etc/os-release
	I1123 08:57:19.001215   62034 info.go:137] Remote host: Buildroot 2025.02
	I1123 08:57:19.001261   62034 filesync.go:126] Scanning /home/jenkins/minikube-integration/21966-18241/.minikube/addons for local assets ...
	I1123 08:57:19.001335   62034 filesync.go:126] Scanning /home/jenkins/minikube-integration/21966-18241/.minikube/files for local assets ...
	I1123 08:57:19.001434   62034 filesync.go:149] local asset: /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/ssl/certs/221482.pem -> 221482.pem in /etc/ssl/certs
	I1123 08:57:19.001551   62034 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
	I1123 08:57:19.015155   62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/ssl/certs/221482.pem --> /etc/ssl/certs/221482.pem (1708 bytes)
	I1123 08:57:19.054248   62034 start.go:296] duration metric: took 158.692501ms for postStartSetup
	I1123 08:57:19.054294   62034 fix.go:56] duration metric: took 20.246777293s for fixHost
	I1123 08:57:19.058146   62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:19.058727   62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
	I1123 08:57:19.058771   62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:19.058998   62034 main.go:143] libmachine: Using SSH client type: native
	I1123 08:57:19.059317   62034 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.72.170 22 <nil> <nil>}
	I1123 08:57:19.059336   62034 main.go:143] libmachine: About to run SSH command:
	date +%s.%N
	I1123 08:57:19.185896   62034 main.go:143] libmachine: SSH cmd err, output: <nil>: 1763888239.115597688
	
	I1123 08:57:19.185919   62034 fix.go:216] guest clock: 1763888239.115597688
	I1123 08:57:19.185926   62034 fix.go:229] Guest: 2025-11-23 08:57:19.115597688 +0000 UTC Remote: 2025-11-23 08:57:19.054315183 +0000 UTC m=+20.376918396 (delta=61.282505ms)
	I1123 08:57:19.185941   62034 fix.go:200] guest clock delta is within tolerance: 61.282505ms
	I1123 08:57:19.185962   62034 start.go:83] releasing machines lock for "embed-certs-059363", held for 20.37844631s
	I1123 08:57:19.189984   62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:19.190596   62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
	I1123 08:57:19.190635   62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:19.191288   62034 ssh_runner.go:195] Run: cat /version.json
	I1123 08:57:19.191295   62034 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
	I1123 08:57:19.195221   62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:19.195642   62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:19.195676   62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
	I1123 08:57:19.195699   62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:19.195883   62034 sshutil.go:53] new ssh client: &{IP:192.168.72.170 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/embed-certs-059363/id_rsa Username:docker}
	I1123 08:57:19.196195   62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
	I1123 08:57:19.196264   62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:19.196563   62034 sshutil.go:53] new ssh client: &{IP:192.168.72.170 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/embed-certs-059363/id_rsa Username:docker}
	I1123 08:57:19.315903   62034 ssh_runner.go:195] Run: systemctl --version
	I1123 08:57:19.323178   62034 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
	W1123 08:57:19.333159   62034 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
	I1123 08:57:19.333365   62034 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
	I1123 08:57:19.356324   62034 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
	I1123 08:57:19.356355   62034 start.go:496] detecting cgroup driver to use...
	I1123 08:57:19.356469   62034 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
	" | sudo tee /etc/crictl.yaml"
	I1123 08:57:19.385750   62034 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
	I1123 08:57:19.400434   62034 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
	I1123 08:57:19.414104   62034 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
	I1123 08:57:19.414182   62034 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
	I1123 08:57:19.433788   62034 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
	I1123 08:57:19.449538   62034 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
	I1123 08:57:19.464107   62034 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
	I1123 08:57:19.481469   62034 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
	I1123 08:57:19.496533   62034 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
	I1123 08:57:19.511385   62034 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
	I1123 08:57:19.525634   62034 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1  enable_unprivileged_ports = true|' /etc/containerd/config.toml"
	I1123 08:57:19.544298   62034 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
	I1123 08:57:19.560120   62034 crio.go:166] couldn't verify netfilter by "sudo sysctl net.bridge.bridge-nf-call-iptables" which might be okay. error: sudo sysctl net.bridge.bridge-nf-call-iptables: Process exited with status 1
	stdout:
	
	stderr:
	sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
	I1123 08:57:19.560179   62034 ssh_runner.go:195] Run: sudo modprobe br_netfilter
	I1123 08:57:19.576631   62034 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
	I1123 08:57:19.592833   62034 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1123 08:57:19.763221   62034 ssh_runner.go:195] Run: sudo systemctl restart containerd
	I1123 08:57:19.811223   62034 start.go:496] detecting cgroup driver to use...
	I1123 08:57:19.811335   62034 ssh_runner.go:195] Run: sudo systemctl cat docker.service
	I1123 08:57:19.833532   62034 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
	I1123 08:57:19.859627   62034 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
	I1123 08:57:19.884432   62034 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
	I1123 08:57:19.903805   62034 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
	I1123 08:57:19.921275   62034 ssh_runner.go:195] Run: sudo systemctl stop -f crio
	I1123 08:57:19.960990   62034 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
	I1123 08:57:19.980317   62034 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
	" | sudo tee /etc/crictl.yaml"
	I1123 08:57:20.008661   62034 ssh_runner.go:195] Run: which cri-dockerd
	I1123 08:57:20.013631   62034 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
	I1123 08:57:20.029302   62034 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (192 bytes)
	I1123 08:57:20.057103   62034 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
	I1123 08:57:20.252891   62034 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
	I1123 08:57:20.490326   62034 docker.go:575] configuring docker to use "cgroupfs" as cgroup driver...
	I1123 08:57:20.490458   62034 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
	I1123 08:57:20.526773   62034 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
	I1123 08:57:20.548985   62034 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1123 08:57:20.740694   62034 ssh_runner.go:195] Run: sudo systemctl restart docker
	I1123 08:57:21.481342   62034 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
	I1123 08:57:21.507341   62034 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
	I1123 08:57:21.530703   62034 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
	I1123 08:57:21.555618   62034 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
	I1123 08:57:21.736442   62034 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
	I1123 08:57:21.910308   62034 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1123 08:57:22.084793   62034 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
	I1123 08:57:22.133988   62034 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
	I1123 08:57:22.150466   62034 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1123 08:57:22.310923   62034 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
	I1123 08:57:22.333687   62034 ssh_runner.go:195] Run: sudo journalctl --no-pager -u cri-docker.service
	I1123 08:57:22.355809   62034 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
	I1123 08:57:22.373321   62034 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
	I1123 08:57:22.392686   62034 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1123 08:57:22.568456   62034 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
	I1123 08:57:22.588895   62034 ssh_runner.go:195] Run: sudo journalctl --no-pager -u cri-docker.service
	I1123 08:57:22.604152   62034 retry.go:31] will retry after 1.30731135s: cri-docker.service not running
	I1123 08:57:19.188404   62386 out.go:252] * Creating kvm2 VM (CPUs=2, Memory=3072MB, Disk=20000MB) ...
	I1123 08:57:19.188687   62386 start.go:159] libmachine.API.Create for "newest-cni-078196" (driver="kvm2")
	I1123 08:57:19.188735   62386 client.go:173] LocalClient.Create starting
	I1123 08:57:19.188852   62386 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem
	I1123 08:57:19.188919   62386 main.go:143] libmachine: Decoding PEM data...
	I1123 08:57:19.188950   62386 main.go:143] libmachine: Parsing certificate...
	I1123 08:57:19.189026   62386 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21966-18241/.minikube/certs/cert.pem
	I1123 08:57:19.189059   62386 main.go:143] libmachine: Decoding PEM data...
	I1123 08:57:19.189080   62386 main.go:143] libmachine: Parsing certificate...
	I1123 08:57:19.189577   62386 main.go:143] libmachine: creating domain...
	I1123 08:57:19.189595   62386 main.go:143] libmachine: creating network...
	I1123 08:57:19.191331   62386 main.go:143] libmachine: found existing default network
	I1123 08:57:19.191879   62386 main.go:143] libmachine: <network connections='3'>
	  <name>default</name>
	  <uuid>c61344c2-dba2-46dd-a21a-34776d235985</uuid>
	  <forward mode='nat'>
	    <nat>
	      <port start='1024' end='65535'/>
	    </nat>
	  </forward>
	  <bridge name='virbr0' stp='on' delay='0'/>
	  <mac address='52:54:00:10:a2:1d'/>
	  <ip address='192.168.122.1' netmask='255.255.255.0'>
	    <dhcp>
	      <range start='192.168.122.2' end='192.168.122.254'/>
	    </dhcp>
	  </ip>
	</network>
	
	I1123 08:57:19.193313   62386 network.go:206] using free private subnet 192.168.39.0/24: &{IP:192.168.39.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.39.0/24 Gateway:192.168.39.1 ClientMin:192.168.39.2 ClientMax:192.168.39.254 Broadcast:192.168.39.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001e04740}
	I1123 08:57:19.193434   62386 main.go:143] libmachine: defining private network:
	
	<network>
	  <name>mk-newest-cni-078196</name>
	  <dns enable='no'/>
	  <ip address='192.168.39.1' netmask='255.255.255.0'>
	    <dhcp>
	      <range start='192.168.39.2' end='192.168.39.253'/>
	    </dhcp>
	  </ip>
	</network>
	
	I1123 08:57:19.200866   62386 main.go:143] libmachine: creating private network mk-newest-cni-078196 192.168.39.0/24...
	I1123 08:57:19.291873   62386 main.go:143] libmachine: private network mk-newest-cni-078196 192.168.39.0/24 created
	I1123 08:57:19.292226   62386 main.go:143] libmachine: <network>
	  <name>mk-newest-cni-078196</name>
	  <uuid>d7bc9eb0-778c-4b77-a392-72f78dc9558b</uuid>
	  <bridge name='virbr1' stp='on' delay='0'/>
	  <mac address='52:54:00:20:cc:6a'/>
	  <dns enable='no'/>
	  <ip address='192.168.39.1' netmask='255.255.255.0'>
	    <dhcp>
	      <range start='192.168.39.2' end='192.168.39.253'/>
	    </dhcp>
	  </ip>
	</network>
	
	I1123 08:57:19.292287   62386 main.go:143] libmachine: setting up store path in /home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196 ...
	I1123 08:57:19.292318   62386 main.go:143] libmachine: building disk image from file:///home/jenkins/minikube-integration/21966-18241/.minikube/cache/iso/amd64/minikube-v1.37.0-1763503576-21924-amd64.iso
	I1123 08:57:19.292332   62386 common.go:152] Making disk image using store path: /home/jenkins/minikube-integration/21966-18241/.minikube
	I1123 08:57:19.292416   62386 main.go:143] libmachine: Downloading /home/jenkins/minikube-integration/21966-18241/.minikube/cache/boot2docker.iso from file:///home/jenkins/minikube-integration/21966-18241/.minikube/cache/iso/amd64/minikube-v1.37.0-1763503576-21924-amd64.iso...
	I1123 08:57:19.540811   62386 common.go:159] Creating ssh key: /home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196/id_rsa...
	I1123 08:57:19.628322   62386 common.go:165] Creating raw disk image: /home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196/newest-cni-078196.rawdisk...
	I1123 08:57:19.628370   62386 main.go:143] libmachine: Writing magic tar header
	I1123 08:57:19.628409   62386 main.go:143] libmachine: Writing SSH key tar header
	I1123 08:57:19.628532   62386 common.go:179] Fixing permissions on /home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196 ...
	I1123 08:57:19.628646   62386 main.go:143] libmachine: checking permissions on dir: /home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196
	I1123 08:57:19.628680   62386 main.go:143] libmachine: setting executable bit set on /home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196 (perms=drwx------)
	I1123 08:57:19.628696   62386 main.go:143] libmachine: checking permissions on dir: /home/jenkins/minikube-integration/21966-18241/.minikube/machines
	I1123 08:57:19.628716   62386 main.go:143] libmachine: setting executable bit set on /home/jenkins/minikube-integration/21966-18241/.minikube/machines (perms=drwxr-xr-x)
	I1123 08:57:19.628737   62386 main.go:143] libmachine: checking permissions on dir: /home/jenkins/minikube-integration/21966-18241/.minikube
	I1123 08:57:19.628753   62386 main.go:143] libmachine: setting executable bit set on /home/jenkins/minikube-integration/21966-18241/.minikube (perms=drwxr-xr-x)
	I1123 08:57:19.628766   62386 main.go:143] libmachine: checking permissions on dir: /home/jenkins/minikube-integration/21966-18241
	I1123 08:57:19.628783   62386 main.go:143] libmachine: setting executable bit set on /home/jenkins/minikube-integration/21966-18241 (perms=drwxrwxr-x)
	I1123 08:57:19.628796   62386 main.go:143] libmachine: checking permissions on dir: /home/jenkins/minikube-integration
	I1123 08:57:19.628812   62386 main.go:143] libmachine: setting executable bit set on /home/jenkins/minikube-integration (perms=drwxrwxr-x)
	I1123 08:57:19.628825   62386 main.go:143] libmachine: checking permissions on dir: /home/jenkins
	I1123 08:57:19.628845   62386 main.go:143] libmachine: setting executable bit set on /home/jenkins (perms=drwxr-xr-x)
	I1123 08:57:19.628862   62386 main.go:143] libmachine: checking permissions on dir: /home
	I1123 08:57:19.628874   62386 main.go:143] libmachine: skipping /home - not owner
	I1123 08:57:19.628886   62386 main.go:143] libmachine: defining domain...
	I1123 08:57:19.630619   62386 main.go:143] libmachine: defining domain using XML: 
	<domain type='kvm'>
	  <name>newest-cni-078196</name>
	  <memory unit='MiB'>3072</memory>
	  <vcpu>2</vcpu>
	  <features>
	    <acpi/>
	    <apic/>
	    <pae/>
	  </features>
	  <cpu mode='host-passthrough'>
	  </cpu>
	  <os>
	    <type>hvm</type>
	    <boot dev='cdrom'/>
	    <boot dev='hd'/>
	    <bootmenu enable='no'/>
	  </os>
	  <devices>
	    <disk type='file' device='cdrom'>
	      <source file='/home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196/boot2docker.iso'/>
	      <target dev='hdc' bus='scsi'/>
	      <readonly/>
	    </disk>
	    <disk type='file' device='disk'>
	      <driver name='qemu' type='raw' cache='default' io='threads' />
	      <source file='/home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196/newest-cni-078196.rawdisk'/>
	      <target dev='hda' bus='virtio'/>
	    </disk>
	    <interface type='network'>
	      <source network='mk-newest-cni-078196'/>
	      <model type='virtio'/>
	    </interface>
	    <interface type='network'>
	      <source network='default'/>
	      <model type='virtio'/>
	    </interface>
	    <serial type='pty'>
	      <target port='0'/>
	    </serial>
	    <console type='pty'>
	      <target type='serial' port='0'/>
	    </console>
	    <rng model='virtio'>
	      <backend model='random'>/dev/random</backend>
	    </rng>
	  </devices>
	</domain>
	
	I1123 08:57:19.637651   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:7a:a4:6b in network default
	I1123 08:57:19.638554   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:19.638580   62386 main.go:143] libmachine: starting domain...
	I1123 08:57:19.638587   62386 main.go:143] libmachine: ensuring networks are active...
	I1123 08:57:19.639501   62386 main.go:143] libmachine: Ensuring network default is active
	I1123 08:57:19.640013   62386 main.go:143] libmachine: Ensuring network mk-newest-cni-078196 is active
	I1123 08:57:19.640748   62386 main.go:143] libmachine: getting domain XML...
	I1123 08:57:19.642270   62386 main.go:143] libmachine: starting domain XML:
	<domain type='kvm'>
	  <name>newest-cni-078196</name>
	  <uuid>67bf4217-d2fd-4841-a93c-e1581f4c5592</uuid>
	  <memory unit='KiB'>3145728</memory>
	  <currentMemory unit='KiB'>3145728</currentMemory>
	  <vcpu placement='static'>2</vcpu>
	  <os>
	    <type arch='x86_64' machine='pc-i440fx-jammy'>hvm</type>
	    <boot dev='cdrom'/>
	    <boot dev='hd'/>
	    <bootmenu enable='no'/>
	  </os>
	  <features>
	    <acpi/>
	    <apic/>
	    <pae/>
	  </features>
	  <cpu mode='host-passthrough' check='none' migratable='on'/>
	  <clock offset='utc'/>
	  <on_poweroff>destroy</on_poweroff>
	  <on_reboot>restart</on_reboot>
	  <on_crash>destroy</on_crash>
	  <devices>
	    <emulator>/usr/bin/qemu-system-x86_64</emulator>
	    <disk type='file' device='cdrom'>
	      <driver name='qemu' type='raw'/>
	      <source file='/home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196/boot2docker.iso'/>
	      <target dev='hdc' bus='scsi'/>
	      <readonly/>
	      <address type='drive' controller='0' bus='0' target='0' unit='2'/>
	    </disk>
	    <disk type='file' device='disk'>
	      <driver name='qemu' type='raw' io='threads'/>
	      <source file='/home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196/newest-cni-078196.rawdisk'/>
	      <target dev='hda' bus='virtio'/>
	      <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
	    </disk>
	    <controller type='usb' index='0' model='piix3-uhci'>
	      <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>
	    </controller>
	    <controller type='pci' index='0' model='pci-root'/>
	    <controller type='scsi' index='0' model='lsilogic'>
	      <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
	    </controller>
	    <interface type='network'>
	      <mac address='52:54:00:d7:c1:0d'/>
	      <source network='mk-newest-cni-078196'/>
	      <model type='virtio'/>
	      <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
	    </interface>
	    <interface type='network'>
	      <mac address='52:54:00:7a:a4:6b'/>
	      <source network='default'/>
	      <model type='virtio'/>
	      <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
	    </interface>
	    <serial type='pty'>
	      <target type='isa-serial' port='0'>
	        <model name='isa-serial'/>
	      </target>
	    </serial>
	    <console type='pty'>
	      <target type='serial' port='0'/>
	    </console>
	    <input type='mouse' bus='ps2'/>
	    <input type='keyboard' bus='ps2'/>
	    <audio id='1' type='none'/>
	    <memballoon model='virtio'>
	      <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
	    </memballoon>
	    <rng model='virtio'>
	      <backend model='random'>/dev/random</backend>
	      <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
	    </rng>
	  </devices>
	</domain>
	
	I1123 08:57:21.239037   62386 main.go:143] libmachine: waiting for domain to start...
	I1123 08:57:21.240876   62386 main.go:143] libmachine: domain is now running
	I1123 08:57:21.240900   62386 main.go:143] libmachine: waiting for IP...
	I1123 08:57:21.241736   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:21.242592   62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
	I1123 08:57:21.242611   62386 main.go:143] libmachine: trying to list again with source=arp
	I1123 08:57:21.243307   62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
	I1123 08:57:21.243346   62386 retry.go:31] will retry after 218.272628ms: waiting for domain to come up
	I1123 08:57:21.462945   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:21.463818   62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
	I1123 08:57:21.463835   62386 main.go:143] libmachine: trying to list again with source=arp
	I1123 08:57:21.464322   62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
	I1123 08:57:21.464353   62386 retry.go:31] will retry after 354.758102ms: waiting for domain to come up
	I1123 08:57:21.820932   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:21.821871   62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
	I1123 08:57:21.821891   62386 main.go:143] libmachine: trying to list again with source=arp
	I1123 08:57:21.822290   62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
	I1123 08:57:21.822322   62386 retry.go:31] will retry after 480.079581ms: waiting for domain to come up
	I1123 08:57:22.304134   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:22.305030   62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
	I1123 08:57:22.305053   62386 main.go:143] libmachine: trying to list again with source=arp
	I1123 08:57:22.305471   62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
	I1123 08:57:22.305501   62386 retry.go:31] will retry after 430.762091ms: waiting for domain to come up
	I1123 08:57:22.738137   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:22.739007   62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
	I1123 08:57:22.739022   62386 main.go:143] libmachine: trying to list again with source=arp
	I1123 08:57:22.739466   62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
	I1123 08:57:22.739499   62386 retry.go:31] will retry after 752.582052ms: waiting for domain to come up
	I1123 08:57:23.493414   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:23.494256   62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
	I1123 08:57:23.494271   62386 main.go:143] libmachine: trying to list again with source=arp
	I1123 08:57:23.494669   62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
	I1123 08:57:23.494696   62386 retry.go:31] will retry after 765.228537ms: waiting for domain to come up
	I1123 08:57:23.912604   62034 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
	I1123 08:57:23.930659   62034 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
	I1123 08:57:23.946465   62034 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1123 08:57:24.099133   62034 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
	I1123 08:57:24.217974   62034 retry.go:31] will retry after 1.350292483s: cri-docker.service not running
	I1123 08:57:25.569520   62034 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
	I1123 08:57:25.588082   62034 start.go:543] Will wait 60s for socket path /var/run/cri-dockerd.sock
	I1123 08:57:25.588166   62034 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
	I1123 08:57:25.595521   62034 start.go:564] Will wait 60s for crictl version
	I1123 08:57:25.595597   62034 ssh_runner.go:195] Run: which crictl
	I1123 08:57:25.600903   62034 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
	I1123 08:57:25.642159   62034 start.go:580] Version:  0.1.0
	RuntimeName:  docker
	RuntimeVersion:  28.5.1
	RuntimeApiVersion:  v1
	I1123 08:57:25.642260   62034 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
	I1123 08:57:25.678324   62034 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
	I1123 08:57:25.708968   62034 out.go:252] * Preparing Kubernetes v1.34.1 on Docker 28.5.1 ...
	I1123 08:57:25.712357   62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:25.712811   62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
	I1123 08:57:25.712861   62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:25.713088   62034 ssh_runner.go:195] Run: grep 192.168.72.1	host.minikube.internal$ /etc/hosts
	I1123 08:57:25.718506   62034 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.72.1	host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
	I1123 08:57:25.737282   62034 kubeadm.go:884] updating cluster {Name:embed-certs-059363 KeepContext:false EmbedCerts:true MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21924/minikube-v1.37.0-1763503576-21924-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1
.34.1 ClusterName:embed-certs-059363 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.72.170 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: Multi
NodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
	I1123 08:57:25.737446   62034 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime docker
	I1123 08:57:25.737523   62034 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
	I1123 08:57:25.759347   62034 docker.go:691] Got preloaded images: -- stdout --
	gcr.io/k8s-minikube/gvisor-addon:2
	registry.k8s.io/kube-scheduler:v1.34.1
	registry.k8s.io/kube-apiserver:v1.34.1
	registry.k8s.io/kube-controller-manager:v1.34.1
	registry.k8s.io/kube-proxy:v1.34.1
	registry.k8s.io/etcd:3.6.4-0
	registry.k8s.io/pause:3.10.1
	registry.k8s.io/coredns/coredns:v1.12.1
	gcr.io/k8s-minikube/storage-provisioner:v5
	gcr.io/k8s-minikube/busybox:1.28.4-glibc
	
	-- /stdout --
	I1123 08:57:25.759372   62034 docker.go:621] Images already preloaded, skipping extraction
	I1123 08:57:25.759440   62034 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
	I1123 08:57:25.784761   62034 docker.go:691] Got preloaded images: -- stdout --
	gcr.io/k8s-minikube/gvisor-addon:2
	registry.k8s.io/kube-apiserver:v1.34.1
	registry.k8s.io/kube-scheduler:v1.34.1
	registry.k8s.io/kube-controller-manager:v1.34.1
	registry.k8s.io/kube-proxy:v1.34.1
	registry.k8s.io/etcd:3.6.4-0
	registry.k8s.io/pause:3.10.1
	registry.k8s.io/coredns/coredns:v1.12.1
	gcr.io/k8s-minikube/storage-provisioner:v5
	gcr.io/k8s-minikube/busybox:1.28.4-glibc
	
	-- /stdout --
	I1123 08:57:25.784786   62034 cache_images.go:86] Images are preloaded, skipping loading
	I1123 08:57:25.784796   62034 kubeadm.go:935] updating node { 192.168.72.170 8443 v1.34.1 docker true true} ...
	I1123 08:57:25.784906   62034 kubeadm.go:947] kubelet [Unit]
	Wants=docker.socket
	
	[Service]
	ExecStart=
	ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=embed-certs-059363 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.72.170
	
	[Install]
	 config:
	{KubernetesVersion:v1.34.1 ClusterName:embed-certs-059363 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
	I1123 08:57:25.784959   62034 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
	I1123 08:57:25.840443   62034 cni.go:84] Creating CNI manager for ""
	I1123 08:57:25.840484   62034 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
	I1123 08:57:25.840500   62034 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
	I1123 08:57:25.840520   62034 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.72.170 APIServerPort:8443 KubernetesVersion:v1.34.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:embed-certs-059363 NodeName:embed-certs-059363 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.72.170"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.72.170 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPo
dPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
	I1123 08:57:25.840651   62034 kubeadm.go:196] kubeadm config:
	apiVersion: kubeadm.k8s.io/v1beta4
	kind: InitConfiguration
	localAPIEndpoint:
	  advertiseAddress: 192.168.72.170
	  bindPort: 8443
	bootstrapTokens:
	  - groups:
	      - system:bootstrappers:kubeadm:default-node-token
	    ttl: 24h0m0s
	    usages:
	      - signing
	      - authentication
	nodeRegistration:
	  criSocket: unix:///var/run/cri-dockerd.sock
	  name: "embed-certs-059363"
	  kubeletExtraArgs:
	    - name: "node-ip"
	      value: "192.168.72.170"
	  taints: []
	---
	apiVersion: kubeadm.k8s.io/v1beta4
	kind: ClusterConfiguration
	apiServer:
	  certSANs: ["127.0.0.1", "localhost", "192.168.72.170"]
	  extraArgs:
	    - name: "enable-admission-plugins"
	      value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
	controllerManager:
	  extraArgs:
	    - name: "allocate-node-cidrs"
	      value: "true"
	    - name: "leader-elect"
	      value: "false"
	scheduler:
	  extraArgs:
	    - name: "leader-elect"
	      value: "false"
	certificatesDir: /var/lib/minikube/certs
	clusterName: mk
	controlPlaneEndpoint: control-plane.minikube.internal:8443
	etcd:
	  local:
	    dataDir: /var/lib/minikube/etcd
	kubernetesVersion: v1.34.1
	networking:
	  dnsDomain: cluster.local
	  podSubnet: "10.244.0.0/16"
	  serviceSubnet: 10.96.0.0/12
	---
	apiVersion: kubelet.config.k8s.io/v1beta1
	kind: KubeletConfiguration
	authentication:
	  x509:
	    clientCAFile: /var/lib/minikube/certs/ca.crt
	cgroupDriver: cgroupfs
	containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
	hairpinMode: hairpin-veth
	runtimeRequestTimeout: 15m
	clusterDomain: "cluster.local"
	# disable disk resource management by default
	imageGCHighThresholdPercent: 100
	evictionHard:
	  nodefs.available: "0%"
	  nodefs.inodesFree: "0%"
	  imagefs.available: "0%"
	failSwapOn: false
	staticPodPath: /etc/kubernetes/manifests
	---
	apiVersion: kubeproxy.config.k8s.io/v1alpha1
	kind: KubeProxyConfiguration
	clusterCIDR: "10.244.0.0/16"
	metricsBindAddress: 0.0.0.0:10249
	conntrack:
	  maxPerCore: 0
	# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
	  tcpEstablishedTimeout: 0s
	# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
	  tcpCloseWaitTimeout: 0s
	
	I1123 08:57:25.840731   62034 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.1
	I1123 08:57:25.855481   62034 binaries.go:51] Found k8s binaries, skipping transfer
	I1123 08:57:25.855562   62034 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
	I1123 08:57:25.869149   62034 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (319 bytes)
	I1123 08:57:25.890030   62034 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
	I1123 08:57:25.913602   62034 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2225 bytes)
	I1123 08:57:25.939399   62034 ssh_runner.go:195] Run: grep 192.168.72.170	control-plane.minikube.internal$ /etc/hosts
	I1123 08:57:25.944187   62034 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.72.170	control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
	I1123 08:57:25.959980   62034 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1123 08:57:26.112182   62034 ssh_runner.go:195] Run: sudo systemctl start kubelet
	I1123 08:57:26.150488   62034 certs.go:69] Setting up /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/embed-certs-059363 for IP: 192.168.72.170
	I1123 08:57:26.150514   62034 certs.go:195] generating shared ca certs ...
	I1123 08:57:26.150535   62034 certs.go:227] acquiring lock for ca certs: {Name:mk4438f2b659811ea2f01e009d28f1b857a5024c Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1123 08:57:26.150704   62034 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21966-18241/.minikube/ca.key
	I1123 08:57:26.150759   62034 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21966-18241/.minikube/proxy-client-ca.key
	I1123 08:57:26.150773   62034 certs.go:257] generating profile certs ...
	I1123 08:57:26.150910   62034 certs.go:360] skipping valid signed profile cert regeneration for "minikube-user": /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/embed-certs-059363/client.key
	I1123 08:57:26.151011   62034 certs.go:360] skipping valid signed profile cert regeneration for "minikube": /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/embed-certs-059363/apiserver.key.4b3bdd21
	I1123 08:57:26.151069   62034 certs.go:360] skipping valid signed profile cert regeneration for "aggregator": /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/embed-certs-059363/proxy-client.key
	I1123 08:57:26.151216   62034 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/22148.pem (1338 bytes)
	W1123 08:57:26.151290   62034 certs.go:480] ignoring /home/jenkins/minikube-integration/21966-18241/.minikube/certs/22148_empty.pem, impossibly tiny 0 bytes
	I1123 08:57:26.151305   62034 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca-key.pem (1675 bytes)
	I1123 08:57:26.151344   62034 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem (1082 bytes)
	I1123 08:57:26.151380   62034 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/cert.pem (1123 bytes)
	I1123 08:57:26.151415   62034 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/key.pem (1675 bytes)
	I1123 08:57:26.151483   62034 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/ssl/certs/221482.pem (1708 bytes)
	I1123 08:57:26.152356   62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
	I1123 08:57:26.201568   62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
	I1123 08:57:26.246367   62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
	I1123 08:57:26.299610   62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
	I1123 08:57:26.334177   62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/embed-certs-059363/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1428 bytes)
	I1123 08:57:26.372484   62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/embed-certs-059363/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
	I1123 08:57:26.408684   62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/embed-certs-059363/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
	I1123 08:57:26.449833   62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/embed-certs-059363/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
	I1123 08:57:26.493006   62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/ssl/certs/221482.pem --> /usr/share/ca-certificates/221482.pem (1708 bytes)
	I1123 08:57:26.527341   62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
	I1123 08:57:26.564892   62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/certs/22148.pem --> /usr/share/ca-certificates/22148.pem (1338 bytes)
	I1123 08:57:26.601408   62034 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
	I1123 08:57:26.626296   62034 ssh_runner.go:195] Run: openssl version
	I1123 08:57:26.634385   62034 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/221482.pem && ln -fs /usr/share/ca-certificates/221482.pem /etc/ssl/certs/221482.pem"
	I1123 08:57:26.650265   62034 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/221482.pem
	I1123 08:57:26.657578   62034 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 23 08:02 /usr/share/ca-certificates/221482.pem
	I1123 08:57:26.657632   62034 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/221482.pem
	I1123 08:57:26.666331   62034 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/221482.pem /etc/ssl/certs/3ec20f2e.0"
	I1123 08:57:26.682746   62034 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
	I1123 08:57:26.697978   62034 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
	I1123 08:57:26.704544   62034 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 23 07:56 /usr/share/ca-certificates/minikubeCA.pem
	I1123 08:57:26.704612   62034 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
	I1123 08:57:26.714575   62034 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
	I1123 08:57:26.730139   62034 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/22148.pem && ln -fs /usr/share/ca-certificates/22148.pem /etc/ssl/certs/22148.pem"
	I1123 08:57:26.745401   62034 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/22148.pem
	I1123 08:57:26.751383   62034 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 23 08:02 /usr/share/ca-certificates/22148.pem
	I1123 08:57:26.751450   62034 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/22148.pem
	I1123 08:57:26.760273   62034 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/22148.pem /etc/ssl/certs/51391683.0"
	I1123 08:57:26.775477   62034 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
	I1123 08:57:26.782298   62034 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-etcd-client.crt -checkend 86400
	I1123 08:57:26.790966   62034 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-kubelet-client.crt -checkend 86400
	I1123 08:57:26.800082   62034 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/server.crt -checkend 86400
	I1123 08:57:26.809033   62034 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/healthcheck-client.crt -checkend 86400
	I1123 08:57:26.818403   62034 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/peer.crt -checkend 86400
	I1123 08:57:26.827424   62034 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/front-proxy-client.crt -checkend 86400
	I1123 08:57:26.836600   62034 kubeadm.go:401] StartCluster: {Name:embed-certs-059363 KeepContext:false EmbedCerts:true MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21924/minikube-v1.37.0-1763503576-21924-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34
.1 ClusterName:embed-certs-059363 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.72.170 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNod
eRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I1123 08:57:26.836750   62034 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
	I1123 08:57:26.857858   62034 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
	I1123 08:57:26.872778   62034 kubeadm.go:417] found existing configuration files, will attempt cluster restart
	I1123 08:57:26.872804   62034 kubeadm.go:598] restartPrimaryControlPlane start ...
	I1123 08:57:26.872861   62034 ssh_runner.go:195] Run: sudo test -d /data/minikube
	I1123 08:57:26.887408   62034 kubeadm.go:131] /data/minikube skipping compat symlinks: sudo test -d /data/minikube: Process exited with status 1
	stdout:
	
	stderr:
	I1123 08:57:26.888007   62034 kubeconfig.go:47] verify endpoint returned: get endpoint: "embed-certs-059363" does not appear in /home/jenkins/minikube-integration/21966-18241/kubeconfig
	I1123 08:57:26.888341   62034 kubeconfig.go:62] /home/jenkins/minikube-integration/21966-18241/kubeconfig needs updating (will repair): [kubeconfig missing "embed-certs-059363" cluster setting kubeconfig missing "embed-certs-059363" context setting]
	I1123 08:57:26.888835   62034 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-18241/kubeconfig: {Name:mk4ff9c09d937b27d93688a0eb9fbee2087daab0 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1123 08:57:26.917419   62034 ssh_runner.go:195] Run: sudo diff -u /var/tmp/minikube/kubeadm.yaml /var/tmp/minikube/kubeadm.yaml.new
	I1123 08:57:26.931495   62034 kubeadm.go:635] The running cluster does not require reconfiguration: 192.168.72.170
	I1123 08:57:26.931533   62034 kubeadm.go:1161] stopping kube-system containers ...
	I1123 08:57:26.931598   62034 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
	I1123 08:57:26.956424   62034 docker.go:484] Stopping containers: [2777bc8dc9d8 1e7fd2e1de3d 766f92e6b85c a4e7b815df08 b78206bd7ac1 246623b92954 0f2d7243cca6 5dc3731f3932 12f2dd5a9262 45882ff88b2f 8437f8a92375 866aa8687d31 230241a2edf7 1c8b359647bb 038fcdc4f7f6 049872fe8a58]
	I1123 08:57:26.956515   62034 ssh_runner.go:195] Run: docker stop 2777bc8dc9d8 1e7fd2e1de3d 766f92e6b85c a4e7b815df08 b78206bd7ac1 246623b92954 0f2d7243cca6 5dc3731f3932 12f2dd5a9262 45882ff88b2f 8437f8a92375 866aa8687d31 230241a2edf7 1c8b359647bb 038fcdc4f7f6 049872fe8a58
	I1123 08:57:26.982476   62034 ssh_runner.go:195] Run: sudo systemctl stop kubelet
	I1123 08:57:27.015459   62034 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
	I1123 08:57:27.030576   62034 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
	stdout:
	
	stderr:
	ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
	ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
	ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
	ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
	I1123 08:57:27.030600   62034 kubeadm.go:158] found existing configuration files:
	
	I1123 08:57:27.030658   62034 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
	I1123 08:57:27.043658   62034 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
	stdout:
	
	stderr:
	grep: /etc/kubernetes/admin.conf: No such file or directory
	I1123 08:57:27.043723   62034 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
	I1123 08:57:27.058167   62034 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
	I1123 08:57:27.074375   62034 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
	stdout:
	
	stderr:
	grep: /etc/kubernetes/kubelet.conf: No such file or directory
	I1123 08:57:27.074449   62034 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
	I1123 08:57:27.091119   62034 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
	I1123 08:57:27.106772   62034 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
	stdout:
	
	stderr:
	grep: /etc/kubernetes/controller-manager.conf: No such file or directory
	I1123 08:57:27.106876   62034 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
	I1123 08:57:27.124425   62034 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
	I1123 08:57:27.140001   62034 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
	stdout:
	
	stderr:
	grep: /etc/kubernetes/scheduler.conf: No such file or directory
	I1123 08:57:27.140061   62034 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
	I1123 08:57:27.154930   62034 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
	I1123 08:57:27.169444   62034 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase certs all --config /var/tmp/minikube/kubeadm.yaml"
	I1123 08:57:27.328883   62034 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase kubeconfig all --config /var/tmp/minikube/kubeadm.yaml"
	I1123 08:57:24.261134   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:24.261787   62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
	I1123 08:57:24.261806   62386 main.go:143] libmachine: trying to list again with source=arp
	I1123 08:57:24.262181   62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
	I1123 08:57:24.262219   62386 retry.go:31] will retry after 1.137472458s: waiting for domain to come up
	I1123 08:57:25.401597   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:25.402373   62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
	I1123 08:57:25.402395   62386 main.go:143] libmachine: trying to list again with source=arp
	I1123 08:57:25.402716   62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
	I1123 08:57:25.402745   62386 retry.go:31] will retry after 1.246843188s: waiting for domain to come up
	I1123 08:57:26.651383   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:26.652402   62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
	I1123 08:57:26.652423   62386 main.go:143] libmachine: trying to list again with source=arp
	I1123 08:57:26.652983   62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
	I1123 08:57:26.653027   62386 retry.go:31] will retry after 1.576847177s: waiting for domain to come up
	I1123 08:57:28.231063   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:28.231892   62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
	I1123 08:57:28.231907   62386 main.go:143] libmachine: trying to list again with source=arp
	I1123 08:57:28.232342   62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
	I1123 08:57:28.232376   62386 retry.go:31] will retry after 2.191968701s: waiting for domain to come up
	I1123 08:57:29.072122   62034 ssh_runner.go:235] Completed: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase kubeconfig all --config /var/tmp/minikube/kubeadm.yaml": (1.743194687s)
	I1123 08:57:29.072199   62034 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase kubelet-start --config /var/tmp/minikube/kubeadm.yaml"
	I1123 08:57:29.363322   62034 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase control-plane all --config /var/tmp/minikube/kubeadm.yaml"
	I1123 08:57:29.437121   62034 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase etcd local --config /var/tmp/minikube/kubeadm.yaml"
	I1123 08:57:29.519180   62034 api_server.go:52] waiting for apiserver process to appear ...
	I1123 08:57:29.519372   62034 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I1123 08:57:30.019409   62034 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I1123 08:57:30.519973   62034 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I1123 08:57:31.019428   62034 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I1123 08:57:31.127420   62034 api_server.go:72] duration metric: took 1.608256805s to wait for apiserver process to appear ...
	I1123 08:57:31.127455   62034 api_server.go:88] waiting for apiserver healthz status ...
	I1123 08:57:31.127480   62034 api_server.go:253] Checking apiserver healthz at https://192.168.72.170:8443/healthz ...
	I1123 08:57:31.128203   62034 api_server.go:269] stopped: https://192.168.72.170:8443/healthz: Get "https://192.168.72.170:8443/healthz": dial tcp 192.168.72.170:8443: connect: connection refused
	I1123 08:57:31.627812   62034 api_server.go:253] Checking apiserver healthz at https://192.168.72.170:8443/healthz ...
	I1123 08:57:30.426848   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:30.427811   62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
	I1123 08:57:30.427838   62386 main.go:143] libmachine: trying to list again with source=arp
	I1123 08:57:30.428254   62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
	I1123 08:57:30.428293   62386 retry.go:31] will retry after 2.66246372s: waiting for domain to come up
	I1123 08:57:33.093605   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:33.094467   62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
	I1123 08:57:33.094487   62386 main.go:143] libmachine: trying to list again with source=arp
	I1123 08:57:33.095017   62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
	I1123 08:57:33.095058   62386 retry.go:31] will retry after 2.368738453s: waiting for domain to come up
	I1123 08:57:34.364730   62034 api_server.go:279] https://192.168.72.170:8443/healthz returned 403:
	{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
	W1123 08:57:34.364762   62034 api_server.go:103] status: https://192.168.72.170:8443/healthz returned error 403:
	{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
	I1123 08:57:34.364778   62034 api_server.go:253] Checking apiserver healthz at https://192.168.72.170:8443/healthz ...
	I1123 08:57:34.401309   62034 api_server.go:279] https://192.168.72.170:8443/healthz returned 403:
	{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
	W1123 08:57:34.401349   62034 api_server.go:103] status: https://192.168.72.170:8443/healthz returned error 403:
	{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
	I1123 08:57:34.627677   62034 api_server.go:253] Checking apiserver healthz at https://192.168.72.170:8443/healthz ...
	I1123 08:57:34.639017   62034 api_server.go:279] https://192.168.72.170:8443/healthz returned 500:
	[+]ping ok
	[+]log ok
	[-]etcd failed: reason withheld
	[+]poststarthook/start-apiserver-admission-initializer ok
	[+]poststarthook/generic-apiserver-start-informers ok
	[+]poststarthook/priority-and-fairness-config-consumer ok
	[+]poststarthook/priority-and-fairness-filter ok
	[+]poststarthook/storage-object-count-tracker-hook ok
	[+]poststarthook/start-apiextensions-informers ok
	[+]poststarthook/start-apiextensions-controllers ok
	[+]poststarthook/crd-informer-synced ok
	[+]poststarthook/start-system-namespaces-controller ok
	[+]poststarthook/start-cluster-authentication-info-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
	[+]poststarthook/start-legacy-token-tracking-controller ok
	[+]poststarthook/start-service-ip-repair-controllers ok
	[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
	[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
	[+]poststarthook/priority-and-fairness-config-producer ok
	[+]poststarthook/bootstrap-controller ok
	[+]poststarthook/start-kubernetes-service-cidr-controller ok
	[+]poststarthook/aggregator-reload-proxy-client-cert ok
	[+]poststarthook/start-kube-aggregator-informers ok
	[+]poststarthook/apiservice-status-local-available-controller ok
	[+]poststarthook/apiservice-status-remote-available-controller ok
	[+]poststarthook/apiservice-registration-controller ok
	[+]poststarthook/apiservice-discovery-controller ok
	[+]poststarthook/kube-apiserver-autoregistration ok
	[+]autoregister-completion ok
	[+]poststarthook/apiservice-openapi-controller ok
	[+]poststarthook/apiservice-openapiv3-controller ok
	healthz check failed
	W1123 08:57:34.639052   62034 api_server.go:103] status: https://192.168.72.170:8443/healthz returned error 500:
	[+]ping ok
	[+]log ok
	[-]etcd failed: reason withheld
	[+]poststarthook/start-apiserver-admission-initializer ok
	[+]poststarthook/generic-apiserver-start-informers ok
	[+]poststarthook/priority-and-fairness-config-consumer ok
	[+]poststarthook/priority-and-fairness-filter ok
	[+]poststarthook/storage-object-count-tracker-hook ok
	[+]poststarthook/start-apiextensions-informers ok
	[+]poststarthook/start-apiextensions-controllers ok
	[+]poststarthook/crd-informer-synced ok
	[+]poststarthook/start-system-namespaces-controller ok
	[+]poststarthook/start-cluster-authentication-info-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
	[+]poststarthook/start-legacy-token-tracking-controller ok
	[+]poststarthook/start-service-ip-repair-controllers ok
	[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
	[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
	[+]poststarthook/priority-and-fairness-config-producer ok
	[+]poststarthook/bootstrap-controller ok
	[+]poststarthook/start-kubernetes-service-cidr-controller ok
	[+]poststarthook/aggregator-reload-proxy-client-cert ok
	[+]poststarthook/start-kube-aggregator-informers ok
	[+]poststarthook/apiservice-status-local-available-controller ok
	[+]poststarthook/apiservice-status-remote-available-controller ok
	[+]poststarthook/apiservice-registration-controller ok
	[+]poststarthook/apiservice-discovery-controller ok
	[+]poststarthook/kube-apiserver-autoregistration ok
	[+]autoregister-completion ok
	[+]poststarthook/apiservice-openapi-controller ok
	[+]poststarthook/apiservice-openapiv3-controller ok
	healthz check failed
	I1123 08:57:35.127669   62034 api_server.go:253] Checking apiserver healthz at https://192.168.72.170:8443/healthz ...
	I1123 08:57:35.133471   62034 api_server.go:279] https://192.168.72.170:8443/healthz returned 500:
	[+]ping ok
	[+]log ok
	[+]etcd ok
	[+]poststarthook/start-apiserver-admission-initializer ok
	[+]poststarthook/generic-apiserver-start-informers ok
	[+]poststarthook/priority-and-fairness-config-consumer ok
	[+]poststarthook/priority-and-fairness-filter ok
	[+]poststarthook/storage-object-count-tracker-hook ok
	[+]poststarthook/start-apiextensions-informers ok
	[+]poststarthook/start-apiextensions-controllers ok
	[+]poststarthook/crd-informer-synced ok
	[+]poststarthook/start-system-namespaces-controller ok
	[+]poststarthook/start-cluster-authentication-info-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
	[+]poststarthook/start-legacy-token-tracking-controller ok
	[+]poststarthook/start-service-ip-repair-controllers ok
	[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
	[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
	[+]poststarthook/priority-and-fairness-config-producer ok
	[+]poststarthook/bootstrap-controller ok
	[+]poststarthook/start-kubernetes-service-cidr-controller ok
	[+]poststarthook/aggregator-reload-proxy-client-cert ok
	[+]poststarthook/start-kube-aggregator-informers ok
	[+]poststarthook/apiservice-status-local-available-controller ok
	[+]poststarthook/apiservice-status-remote-available-controller ok
	[+]poststarthook/apiservice-registration-controller ok
	[+]poststarthook/apiservice-discovery-controller ok
	[+]poststarthook/kube-apiserver-autoregistration ok
	[+]autoregister-completion ok
	[+]poststarthook/apiservice-openapi-controller ok
	[+]poststarthook/apiservice-openapiv3-controller ok
	healthz check failed
	W1123 08:57:35.133500   62034 api_server.go:103] status: https://192.168.72.170:8443/healthz returned error 500:
	[+]ping ok
	[+]log ok
	[+]etcd ok
	[+]poststarthook/start-apiserver-admission-initializer ok
	[+]poststarthook/generic-apiserver-start-informers ok
	[+]poststarthook/priority-and-fairness-config-consumer ok
	[+]poststarthook/priority-and-fairness-filter ok
	[+]poststarthook/storage-object-count-tracker-hook ok
	[+]poststarthook/start-apiextensions-informers ok
	[+]poststarthook/start-apiextensions-controllers ok
	[+]poststarthook/crd-informer-synced ok
	[+]poststarthook/start-system-namespaces-controller ok
	[+]poststarthook/start-cluster-authentication-info-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
	[+]poststarthook/start-legacy-token-tracking-controller ok
	[+]poststarthook/start-service-ip-repair-controllers ok
	[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
	[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
	[+]poststarthook/priority-and-fairness-config-producer ok
	[+]poststarthook/bootstrap-controller ok
	[+]poststarthook/start-kubernetes-service-cidr-controller ok
	[+]poststarthook/aggregator-reload-proxy-client-cert ok
	[+]poststarthook/start-kube-aggregator-informers ok
	[+]poststarthook/apiservice-status-local-available-controller ok
	[+]poststarthook/apiservice-status-remote-available-controller ok
	[+]poststarthook/apiservice-registration-controller ok
	[+]poststarthook/apiservice-discovery-controller ok
	[+]poststarthook/kube-apiserver-autoregistration ok
	[+]autoregister-completion ok
	[+]poststarthook/apiservice-openapi-controller ok
	[+]poststarthook/apiservice-openapiv3-controller ok
	healthz check failed
	I1123 08:57:35.628190   62034 api_server.go:253] Checking apiserver healthz at https://192.168.72.170:8443/healthz ...
	I1123 08:57:35.637607   62034 api_server.go:279] https://192.168.72.170:8443/healthz returned 500:
	[+]ping ok
	[+]log ok
	[+]etcd ok
	[+]poststarthook/start-apiserver-admission-initializer ok
	[+]poststarthook/generic-apiserver-start-informers ok
	[+]poststarthook/priority-and-fairness-config-consumer ok
	[+]poststarthook/priority-and-fairness-filter ok
	[+]poststarthook/storage-object-count-tracker-hook ok
	[+]poststarthook/start-apiextensions-informers ok
	[+]poststarthook/start-apiextensions-controllers ok
	[+]poststarthook/crd-informer-synced ok
	[+]poststarthook/start-system-namespaces-controller ok
	[+]poststarthook/start-cluster-authentication-info-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
	[+]poststarthook/start-legacy-token-tracking-controller ok
	[+]poststarthook/start-service-ip-repair-controllers ok
	[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
	[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
	[+]poststarthook/priority-and-fairness-config-producer ok
	[+]poststarthook/bootstrap-controller ok
	[+]poststarthook/start-kubernetes-service-cidr-controller ok
	[+]poststarthook/aggregator-reload-proxy-client-cert ok
	[+]poststarthook/start-kube-aggregator-informers ok
	[+]poststarthook/apiservice-status-local-available-controller ok
	[+]poststarthook/apiservice-status-remote-available-controller ok
	[+]poststarthook/apiservice-registration-controller ok
	[+]poststarthook/apiservice-discovery-controller ok
	[+]poststarthook/kube-apiserver-autoregistration ok
	[+]autoregister-completion ok
	[+]poststarthook/apiservice-openapi-controller ok
	[+]poststarthook/apiservice-openapiv3-controller ok
	healthz check failed
	W1123 08:57:35.637636   62034 api_server.go:103] status: https://192.168.72.170:8443/healthz returned error 500:
	[+]ping ok
	[+]log ok
	[+]etcd ok
	[+]poststarthook/start-apiserver-admission-initializer ok
	[+]poststarthook/generic-apiserver-start-informers ok
	[+]poststarthook/priority-and-fairness-config-consumer ok
	[+]poststarthook/priority-and-fairness-filter ok
	[+]poststarthook/storage-object-count-tracker-hook ok
	[+]poststarthook/start-apiextensions-informers ok
	[+]poststarthook/start-apiextensions-controllers ok
	[+]poststarthook/crd-informer-synced ok
	[+]poststarthook/start-system-namespaces-controller ok
	[+]poststarthook/start-cluster-authentication-info-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
	[+]poststarthook/start-legacy-token-tracking-controller ok
	[+]poststarthook/start-service-ip-repair-controllers ok
	[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
	[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
	[+]poststarthook/priority-and-fairness-config-producer ok
	[+]poststarthook/bootstrap-controller ok
	[+]poststarthook/start-kubernetes-service-cidr-controller ok
	[+]poststarthook/aggregator-reload-proxy-client-cert ok
	[+]poststarthook/start-kube-aggregator-informers ok
	[+]poststarthook/apiservice-status-local-available-controller ok
	[+]poststarthook/apiservice-status-remote-available-controller ok
	[+]poststarthook/apiservice-registration-controller ok
	[+]poststarthook/apiservice-discovery-controller ok
	[+]poststarthook/kube-apiserver-autoregistration ok
	[+]autoregister-completion ok
	[+]poststarthook/apiservice-openapi-controller ok
	[+]poststarthook/apiservice-openapiv3-controller ok
	healthz check failed
	I1123 08:57:36.128401   62034 api_server.go:253] Checking apiserver healthz at https://192.168.72.170:8443/healthz ...
	I1123 08:57:36.134007   62034 api_server.go:279] https://192.168.72.170:8443/healthz returned 200:
	ok
	I1123 08:57:36.142338   62034 api_server.go:141] control plane version: v1.34.1
	I1123 08:57:36.142374   62034 api_server.go:131] duration metric: took 5.014912025s to wait for apiserver health ...
	I1123 08:57:36.142383   62034 cni.go:84] Creating CNI manager for ""
	I1123 08:57:36.142394   62034 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
	I1123 08:57:36.144644   62034 out.go:179] * Configuring bridge CNI (Container Networking Interface) ...
	I1123 08:57:36.146156   62034 ssh_runner.go:195] Run: sudo mkdir -p /etc/cni/net.d
	I1123 08:57:36.172405   62034 ssh_runner.go:362] scp memory --> /etc/cni/net.d/1-k8s.conflist (496 bytes)
	I1123 08:57:36.206117   62034 system_pods.go:43] waiting for kube-system pods to appear ...
	I1123 08:57:36.212151   62034 system_pods.go:59] 8 kube-system pods found
	I1123 08:57:36.212192   62034 system_pods.go:61] "coredns-66bc5c9577-665gz" [95fc7e21-4842-4c82-8e6a-aacd9494cdaf] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
	I1123 08:57:36.212201   62034 system_pods.go:61] "etcd-embed-certs-059363" [fa029d3b-b887-4f84-9479-84020bb36c03] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
	I1123 08:57:36.212209   62034 system_pods.go:61] "kube-apiserver-embed-certs-059363" [4949b4bd-7e15-4092-90e1-215419673b50] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
	I1123 08:57:36.212215   62034 system_pods.go:61] "kube-controller-manager-embed-certs-059363" [4bf4b11c-274e-4bc4-b4f7-39b40f9ea51b] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
	I1123 08:57:36.212219   62034 system_pods.go:61] "kube-proxy-sjvcr" [73a4ab24-78f1-4223-9e4b-fbf39c225875] Running
	I1123 08:57:36.212227   62034 system_pods.go:61] "kube-scheduler-embed-certs-059363" [2ad27af2-3f59-44b5-b888-c5fee6b5db68] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
	I1123 08:57:36.212254   62034 system_pods.go:61] "metrics-server-746fcd58dc-jc8k8" [93a43ecf-712d-44ba-a709-9bc223d0990e] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
	I1123 08:57:36.212263   62034 system_pods.go:61] "storage-provisioner" [3a6c5ffc-b8ab-4fc3-bdaa-048e59ab4766] Running
	I1123 08:57:36.212272   62034 system_pods.go:74] duration metric: took 6.125497ms to wait for pod list to return data ...
	I1123 08:57:36.212281   62034 node_conditions.go:102] verifying NodePressure condition ...
	I1123 08:57:36.216399   62034 node_conditions.go:122] node storage ephemeral capacity is 17734596Ki
	I1123 08:57:36.216437   62034 node_conditions.go:123] node cpu capacity is 2
	I1123 08:57:36.216455   62034 node_conditions.go:105] duration metric: took 4.163261ms to run NodePressure ...
	I1123 08:57:36.216523   62034 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase addon all --config /var/tmp/minikube/kubeadm.yaml"
	I1123 08:57:36.499954   62034 kubeadm.go:729] waiting for restarted kubelet to initialise ...
	I1123 08:57:36.504225   62034 kubeadm.go:744] kubelet initialised
	I1123 08:57:36.504271   62034 kubeadm.go:745] duration metric: took 4.279186ms waiting for restarted kubelet to initialise ...
	I1123 08:57:36.504293   62034 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
	I1123 08:57:36.525819   62034 ops.go:34] apiserver oom_adj: -16
	I1123 08:57:36.525847   62034 kubeadm.go:602] duration metric: took 9.653035112s to restartPrimaryControlPlane
	I1123 08:57:36.525859   62034 kubeadm.go:403] duration metric: took 9.689268169s to StartCluster
	I1123 08:57:36.525879   62034 settings.go:142] acquiring lock: {Name:mk0efabf238cb985c892ac3a9b32ac206b9f2336 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1123 08:57:36.525969   62034 settings.go:150] Updating kubeconfig:  /home/jenkins/minikube-integration/21966-18241/kubeconfig
	I1123 08:57:36.527038   62034 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-18241/kubeconfig: {Name:mk4ff9c09d937b27d93688a0eb9fbee2087daab0 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1123 08:57:36.527368   62034 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.72.170 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}
	I1123 08:57:36.527458   62034 addons.go:527] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:true default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:true nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
	I1123 08:57:36.527579   62034 config.go:182] Loaded profile config "embed-certs-059363": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
	I1123 08:57:36.527600   62034 addons.go:70] Setting metrics-server=true in profile "embed-certs-059363"
	I1123 08:57:36.527599   62034 addons.go:70] Setting default-storageclass=true in profile "embed-certs-059363"
	I1123 08:57:36.527579   62034 addons.go:70] Setting storage-provisioner=true in profile "embed-certs-059363"
	I1123 08:57:36.527644   62034 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "embed-certs-059363"
	I1123 08:57:36.527635   62034 addons.go:70] Setting dashboard=true in profile "embed-certs-059363"
	I1123 08:57:36.527665   62034 addons.go:239] Setting addon dashboard=true in "embed-certs-059363"
	W1123 08:57:36.527679   62034 addons.go:248] addon dashboard should already be in state true
	I1123 08:57:36.527666   62034 cache.go:107] acquiring lock: {Name:mk5578ff0020d8c222414769e0c7ca17014d52f1 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I1123 08:57:36.527671   62034 addons.go:239] Setting addon storage-provisioner=true in "embed-certs-059363"
	W1123 08:57:36.527702   62034 addons.go:248] addon storage-provisioner should already be in state true
	I1123 08:57:36.527733   62034 cache.go:115] /home/jenkins/minikube-integration/21966-18241/.minikube/cache/images/amd64/gcr.io/k8s-minikube/gvisor-addon_2 exists
	I1123 08:57:36.527637   62034 addons.go:239] Setting addon metrics-server=true in "embed-certs-059363"
	I1123 08:57:36.527748   62034 cache.go:96] cache image "gcr.io/k8s-minikube/gvisor-addon:2" -> "/home/jenkins/minikube-integration/21966-18241/.minikube/cache/images/amd64/gcr.io/k8s-minikube/gvisor-addon_2" took 96.823µs
	I1123 08:57:36.527758   62034 cache.go:80] save to tar file gcr.io/k8s-minikube/gvisor-addon:2 -> /home/jenkins/minikube-integration/21966-18241/.minikube/cache/images/amd64/gcr.io/k8s-minikube/gvisor-addon_2 succeeded
	I1123 08:57:36.527763   62034 host.go:66] Checking if "embed-certs-059363" exists ...
	I1123 08:57:36.527766   62034 cache.go:87] Successfully saved all images to host disk.
	W1123 08:57:36.527758   62034 addons.go:248] addon metrics-server should already be in state true
	I1123 08:57:36.527796   62034 host.go:66] Checking if "embed-certs-059363" exists ...
	I1123 08:57:36.527738   62034 host.go:66] Checking if "embed-certs-059363" exists ...
	I1123 08:57:36.527934   62034 config.go:182] Loaded profile config "embed-certs-059363": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
	I1123 08:57:36.529271   62034 out.go:179] * Verifying Kubernetes components...
	I1123 08:57:36.530935   62034 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
	I1123 08:57:36.531022   62034 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1123 08:57:36.532294   62034 addons.go:239] Setting addon default-storageclass=true in "embed-certs-059363"
	W1123 08:57:36.532326   62034 addons.go:248] addon default-storageclass should already be in state true
	I1123 08:57:36.532348   62034 host.go:66] Checking if "embed-certs-059363" exists ...
	I1123 08:57:36.533191   62034 out.go:179]   - Using image registry.k8s.io/echoserver:1.4
	I1123 08:57:36.533215   62034 out.go:179]   - Using image gcr.io/k8s-minikube/storage-provisioner:v5
	I1123 08:57:36.533195   62034 out.go:179]   - Using image fake.domain/registry.k8s.io/echoserver:1.4
	I1123 08:57:36.534073   62034 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
	I1123 08:57:36.534091   62034 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
	I1123 08:57:36.534667   62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:36.535129   62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
	I1123 08:57:36.535347   62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:36.535858   62034 sshutil.go:53] new ssh client: &{IP:192.168.72.170 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/embed-certs-059363/id_rsa Username:docker}
	I1123 08:57:36.536061   62034 addons.go:436] installing /etc/kubernetes/addons/metrics-apiservice.yaml
	I1123 08:57:36.536084   62034 ssh_runner.go:362] scp metrics-server/metrics-apiservice.yaml --> /etc/kubernetes/addons/metrics-apiservice.yaml (424 bytes)
	I1123 08:57:36.536132   62034 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
	I1123 08:57:36.536145   62034 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
	I1123 08:57:36.536880   62034 out.go:179]   - Using image docker.io/kubernetesui/dashboard:v2.7.0
	I1123 08:57:36.537788   62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:36.538214   62034 addons.go:436] installing /etc/kubernetes/addons/dashboard-ns.yaml
	I1123 08:57:36.538249   62034 ssh_runner.go:362] scp dashboard/dashboard-ns.yaml --> /etc/kubernetes/addons/dashboard-ns.yaml (759 bytes)
	I1123 08:57:36.538746   62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
	I1123 08:57:36.538816   62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:36.539088   62034 sshutil.go:53] new ssh client: &{IP:192.168.72.170 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/embed-certs-059363/id_rsa Username:docker}
	I1123 08:57:36.540090   62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:36.540146   62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:36.541026   62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
	I1123 08:57:36.541069   62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:36.541120   62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
	I1123 08:57:36.541158   62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:36.541257   62034 sshutil.go:53] new ssh client: &{IP:192.168.72.170 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/embed-certs-059363/id_rsa Username:docker}
	I1123 08:57:36.541514   62034 sshutil.go:53] new ssh client: &{IP:192.168.72.170 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/embed-certs-059363/id_rsa Username:docker}
	I1123 08:57:36.542423   62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:36.542896   62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
	I1123 08:57:36.542931   62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:36.543116   62034 sshutil.go:53] new ssh client: &{IP:192.168.72.170 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/embed-certs-059363/id_rsa Username:docker}
	I1123 08:57:36.844170   62034 ssh_runner.go:195] Run: sudo systemctl start kubelet
	I1123 08:57:36.869742   62034 node_ready.go:35] waiting up to 6m0s for node "embed-certs-059363" to be "Ready" ...
	I1123 08:57:36.960323   62034 docker.go:691] Got preloaded images: -- stdout --
	gcr.io/k8s-minikube/gvisor-addon:2
	registry.k8s.io/kube-scheduler:v1.34.1
	registry.k8s.io/kube-apiserver:v1.34.1
	registry.k8s.io/kube-controller-manager:v1.34.1
	registry.k8s.io/kube-proxy:v1.34.1
	registry.k8s.io/etcd:3.6.4-0
	registry.k8s.io/pause:3.10.1
	registry.k8s.io/coredns/coredns:v1.12.1
	gcr.io/k8s-minikube/storage-provisioner:v5
	gcr.io/k8s-minikube/busybox:1.28.4-glibc
	
	-- /stdout --
	I1123 08:57:36.960371   62034 cache_images.go:86] Images are preloaded, skipping loading
	I1123 08:57:36.960379   62034 cache_images.go:264] succeeded pushing to: embed-certs-059363
	I1123 08:57:37.000609   62034 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
	I1123 08:57:37.008492   62034 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
	I1123 08:57:37.017692   62034 addons.go:436] installing /etc/kubernetes/addons/metrics-server-deployment.yaml
	I1123 08:57:37.017713   62034 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-deployment.yaml (1825 bytes)
	I1123 08:57:37.020529   62034 addons.go:436] installing /etc/kubernetes/addons/dashboard-clusterrole.yaml
	I1123 08:57:37.020561   62034 ssh_runner.go:362] scp dashboard/dashboard-clusterrole.yaml --> /etc/kubernetes/addons/dashboard-clusterrole.yaml (1001 bytes)
	I1123 08:57:37.074670   62034 addons.go:436] installing /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml
	I1123 08:57:37.074710   62034 ssh_runner.go:362] scp dashboard/dashboard-clusterrolebinding.yaml --> /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml (1018 bytes)
	I1123 08:57:37.076076   62034 addons.go:436] installing /etc/kubernetes/addons/metrics-server-rbac.yaml
	I1123 08:57:37.076096   62034 ssh_runner.go:362] scp metrics-server/metrics-server-rbac.yaml --> /etc/kubernetes/addons/metrics-server-rbac.yaml (2175 bytes)
	I1123 08:57:37.132446   62034 addons.go:436] installing /etc/kubernetes/addons/metrics-server-service.yaml
	I1123 08:57:37.132466   62034 ssh_runner.go:362] scp metrics-server/metrics-server-service.yaml --> /etc/kubernetes/addons/metrics-server-service.yaml (446 bytes)
	I1123 08:57:37.134322   62034 addons.go:436] installing /etc/kubernetes/addons/dashboard-configmap.yaml
	I1123 08:57:37.134339   62034 ssh_runner.go:362] scp dashboard/dashboard-configmap.yaml --> /etc/kubernetes/addons/dashboard-configmap.yaml (837 bytes)
	I1123 08:57:37.188291   62034 addons.go:436] installing /etc/kubernetes/addons/dashboard-dp.yaml
	I1123 08:57:37.188311   62034 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/dashboard-dp.yaml (4201 bytes)
	I1123 08:57:37.200924   62034 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
	I1123 08:57:37.265084   62034 addons.go:436] installing /etc/kubernetes/addons/dashboard-role.yaml
	I1123 08:57:37.265109   62034 ssh_runner.go:362] scp dashboard/dashboard-role.yaml --> /etc/kubernetes/addons/dashboard-role.yaml (1724 bytes)
	I1123 08:57:37.341532   62034 addons.go:436] installing /etc/kubernetes/addons/dashboard-rolebinding.yaml
	I1123 08:57:37.341559   62034 ssh_runner.go:362] scp dashboard/dashboard-rolebinding.yaml --> /etc/kubernetes/addons/dashboard-rolebinding.yaml (1046 bytes)
	I1123 08:57:37.425079   62034 addons.go:436] installing /etc/kubernetes/addons/dashboard-sa.yaml
	I1123 08:57:37.425110   62034 ssh_runner.go:362] scp dashboard/dashboard-sa.yaml --> /etc/kubernetes/addons/dashboard-sa.yaml (837 bytes)
	I1123 08:57:37.510704   62034 addons.go:436] installing /etc/kubernetes/addons/dashboard-secret.yaml
	I1123 08:57:37.510748   62034 ssh_runner.go:362] scp dashboard/dashboard-secret.yaml --> /etc/kubernetes/addons/dashboard-secret.yaml (1389 bytes)
	I1123 08:57:37.600957   62034 addons.go:436] installing /etc/kubernetes/addons/dashboard-svc.yaml
	I1123 08:57:37.600982   62034 ssh_runner.go:362] scp dashboard/dashboard-svc.yaml --> /etc/kubernetes/addons/dashboard-svc.yaml (1294 bytes)
	I1123 08:57:37.663098   62034 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml
	I1123 08:57:38.728547   62034 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (1.720019273s)
	I1123 08:57:38.824306   62034 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: (1.623332944s)
	I1123 08:57:38.824374   62034 addons.go:495] Verifying addon metrics-server=true in "embed-certs-059363"
	W1123 08:57:38.886375   62034 node_ready.go:57] node "embed-certs-059363" has "Ready":"False" status (will retry)
	I1123 08:57:39.122207   62034 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: (1.459038888s)
	I1123 08:57:39.124248   62034 out.go:179] * Some dashboard features require the metrics-server addon. To enable all features please run:
	
		minikube -p embed-certs-059363 addons enable metrics-server
	
	I1123 08:57:39.126125   62034 out.go:179] * Enabled addons: default-storageclass, storage-provisioner, metrics-server, dashboard
	I1123 08:57:35.465742   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:35.466525   62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
	I1123 08:57:35.466540   62386 main.go:143] libmachine: trying to list again with source=arp
	I1123 08:57:35.467003   62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
	I1123 08:57:35.467033   62386 retry.go:31] will retry after 4.454598391s: waiting for domain to come up
	I1123 08:57:42.467134   62480 start.go:364] duration metric: took 25.46601127s to acquireMachinesLock for "default-k8s-diff-port-925051"
	I1123 08:57:42.467190   62480 start.go:96] Skipping create...Using existing machine configuration
	I1123 08:57:42.467196   62480 fix.go:54] fixHost starting: 
	I1123 08:57:42.469900   62480 fix.go:112] recreateIfNeeded on default-k8s-diff-port-925051: state=Stopped err=<nil>
	W1123 08:57:42.469946   62480 fix.go:138] unexpected machine state, will restart: <nil>
	I1123 08:57:39.127521   62034 addons.go:530] duration metric: took 2.600069679s for enable addons: enabled=[default-storageclass storage-provisioner metrics-server dashboard]
	W1123 08:57:41.375432   62034 node_ready.go:57] node "embed-certs-059363" has "Ready":"False" status (will retry)
	I1123 08:57:39.922903   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:39.923713   62386 main.go:143] libmachine: domain newest-cni-078196 has current primary IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:39.923726   62386 main.go:143] libmachine: found domain IP: 192.168.39.87
	I1123 08:57:39.923732   62386 main.go:143] libmachine: reserving static IP address...
	I1123 08:57:39.924129   62386 main.go:143] libmachine: unable to find host DHCP lease matching {name: "newest-cni-078196", mac: "52:54:00:d7:c1:0d", ip: "192.168.39.87"} in network mk-newest-cni-078196
	I1123 08:57:40.154544   62386 main.go:143] libmachine: reserved static IP address 192.168.39.87 for domain newest-cni-078196
	I1123 08:57:40.154569   62386 main.go:143] libmachine: waiting for SSH...
	I1123 08:57:40.154577   62386 main.go:143] libmachine: Getting to WaitForSSH function...
	I1123 08:57:40.157877   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:40.158255   62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:minikube Clientid:01:52:54:00:d7:c1:0d}
	I1123 08:57:40.158277   62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:40.158452   62386 main.go:143] libmachine: Using SSH client type: native
	I1123 08:57:40.158677   62386 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.39.87 22 <nil> <nil>}
	I1123 08:57:40.158690   62386 main.go:143] libmachine: About to run SSH command:
	exit 0
	I1123 08:57:40.266068   62386 main.go:143] libmachine: SSH cmd err, output: <nil>: 
	I1123 08:57:40.266484   62386 main.go:143] libmachine: domain creation complete
	I1123 08:57:40.268135   62386 machine.go:94] provisionDockerMachine start ...
	I1123 08:57:40.270701   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:40.271083   62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
	I1123 08:57:40.271106   62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:40.271243   62386 main.go:143] libmachine: Using SSH client type: native
	I1123 08:57:40.271436   62386 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.39.87 22 <nil> <nil>}
	I1123 08:57:40.271446   62386 main.go:143] libmachine: About to run SSH command:
	hostname
	I1123 08:57:40.377718   62386 main.go:143] libmachine: SSH cmd err, output: <nil>: minikube
	
	I1123 08:57:40.377749   62386 buildroot.go:166] provisioning hostname "newest-cni-078196"
	I1123 08:57:40.381682   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:40.382224   62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
	I1123 08:57:40.382274   62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:40.382549   62386 main.go:143] libmachine: Using SSH client type: native
	I1123 08:57:40.382750   62386 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.39.87 22 <nil> <nil>}
	I1123 08:57:40.382763   62386 main.go:143] libmachine: About to run SSH command:
	sudo hostname newest-cni-078196 && echo "newest-cni-078196" | sudo tee /etc/hostname
	I1123 08:57:40.510920   62386 main.go:143] libmachine: SSH cmd err, output: <nil>: newest-cni-078196
	
	I1123 08:57:40.514470   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:40.514870   62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
	I1123 08:57:40.514901   62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:40.515119   62386 main.go:143] libmachine: Using SSH client type: native
	I1123 08:57:40.515349   62386 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.39.87 22 <nil> <nil>}
	I1123 08:57:40.515373   62386 main.go:143] libmachine: About to run SSH command:
	
			if ! grep -xq '.*\snewest-cni-078196' /etc/hosts; then
				if grep -xq '127.0.1.1\s.*' /etc/hosts; then
					sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 newest-cni-078196/g' /etc/hosts;
				else 
					echo '127.0.1.1 newest-cni-078196' | sudo tee -a /etc/hosts; 
				fi
			fi
	I1123 08:57:40.644008   62386 main.go:143] libmachine: SSH cmd err, output: <nil>: 
	I1123 08:57:40.644045   62386 buildroot.go:172] set auth options {CertDir:/home/jenkins/minikube-integration/21966-18241/.minikube CaCertPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21966-18241/.minikube}
	I1123 08:57:40.644119   62386 buildroot.go:174] setting up certificates
	I1123 08:57:40.644132   62386 provision.go:84] configureAuth start
	I1123 08:57:40.647940   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:40.648462   62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
	I1123 08:57:40.648495   62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:40.651488   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:40.651967   62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
	I1123 08:57:40.652002   62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:40.652153   62386 provision.go:143] copyHostCerts
	I1123 08:57:40.652210   62386 exec_runner.go:144] found /home/jenkins/minikube-integration/21966-18241/.minikube/cert.pem, removing ...
	I1123 08:57:40.652252   62386 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21966-18241/.minikube/cert.pem
	I1123 08:57:40.652340   62386 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21966-18241/.minikube/cert.pem (1123 bytes)
	I1123 08:57:40.652511   62386 exec_runner.go:144] found /home/jenkins/minikube-integration/21966-18241/.minikube/key.pem, removing ...
	I1123 08:57:40.652528   62386 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21966-18241/.minikube/key.pem
	I1123 08:57:40.652580   62386 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21966-18241/.minikube/key.pem (1675 bytes)
	I1123 08:57:40.652714   62386 exec_runner.go:144] found /home/jenkins/minikube-integration/21966-18241/.minikube/ca.pem, removing ...
	I1123 08:57:40.652735   62386 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21966-18241/.minikube/ca.pem
	I1123 08:57:40.652778   62386 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21966-18241/.minikube/ca.pem (1082 bytes)
	I1123 08:57:40.652872   62386 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21966-18241/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca-key.pem org=jenkins.newest-cni-078196 san=[127.0.0.1 192.168.39.87 localhost minikube newest-cni-078196]
	I1123 08:57:40.723606   62386 provision.go:177] copyRemoteCerts
	I1123 08:57:40.723663   62386 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
	I1123 08:57:40.726615   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:40.727086   62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
	I1123 08:57:40.727115   62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:40.727301   62386 sshutil.go:53] new ssh client: &{IP:192.168.39.87 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196/id_rsa Username:docker}
	I1123 08:57:40.819420   62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
	I1123 08:57:40.852505   62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/machines/server.pem --> /etc/docker/server.pem (1220 bytes)
	I1123 08:57:40.888555   62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
	I1123 08:57:40.923977   62386 provision.go:87] duration metric: took 279.828188ms to configureAuth
	I1123 08:57:40.924014   62386 buildroot.go:189] setting minikube options for container-runtime
	I1123 08:57:40.924275   62386 config.go:182] Loaded profile config "newest-cni-078196": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
	I1123 08:57:40.927517   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:40.927915   62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
	I1123 08:57:40.927938   62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:40.928098   62386 main.go:143] libmachine: Using SSH client type: native
	I1123 08:57:40.928391   62386 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.39.87 22 <nil> <nil>}
	I1123 08:57:40.928404   62386 main.go:143] libmachine: About to run SSH command:
	df --output=fstype / | tail -n 1
	I1123 08:57:41.042673   62386 main.go:143] libmachine: SSH cmd err, output: <nil>: tmpfs
	
	I1123 08:57:41.042707   62386 buildroot.go:70] root file system type: tmpfs
	I1123 08:57:41.042873   62386 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
	I1123 08:57:41.046445   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:41.046989   62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
	I1123 08:57:41.047094   62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:41.047391   62386 main.go:143] libmachine: Using SSH client type: native
	I1123 08:57:41.047683   62386 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.39.87 22 <nil> <nil>}
	I1123 08:57:41.047769   62386 main.go:143] libmachine: About to run SSH command:
	sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
	Description=Docker Application Container Engine
	Documentation=https://docs.docker.com
	After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
	Wants=network-online.target containerd.service
	Requires=docker.socket
	StartLimitBurst=3
	StartLimitIntervalSec=60
	[Service]
	Type=notify
	Restart=always
	
	
	
	ExecStart=
	ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 \
		-H fd:// --containerd=/run/containerd/containerd.sock \
		-H unix:///var/run/docker.sock \
		--default-ulimit=nofile=1048576:1048576 \
		--tlsverify \
		--tlscacert /etc/docker/ca.pem \
		--tlscert /etc/docker/server.pem \
		--tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12 
	ExecReload=/bin/kill -s HUP \$MAINPID
	
	# Having non-zero Limit*s causes performance problems due to accounting overhead
	# in the kernel. We recommend using cgroups to do container-local accounting.
	LimitNOFILE=infinity
	LimitNPROC=infinity
	LimitCORE=infinity
	
	# Uncomment TasksMax if your systemd version supports it.
	# Only systemd 226 and above support this version.
	TasksMax=infinity
	
	# set delegate yes so that systemd does not reset the cgroups of docker containers
	Delegate=yes
	
	# kill only the docker process, not all processes in the cgroup
	KillMode=process
	OOMScoreAdjust=-500
	
	[Install]
	WantedBy=multi-user.target
	" | sudo tee /lib/systemd/system/docker.service.new
	I1123 08:57:41.175224   62386 main.go:143] libmachine: SSH cmd err, output: <nil>: [Unit]
	Description=Docker Application Container Engine
	Documentation=https://docs.docker.com
	After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
	Wants=network-online.target containerd.service
	Requires=docker.socket
	StartLimitBurst=3
	StartLimitIntervalSec=60
	[Service]
	Type=notify
	Restart=always
	
	
	
	ExecStart=
	ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 	-H fd:// --containerd=/run/containerd/containerd.sock 	-H unix:///var/run/docker.sock 	--default-ulimit=nofile=1048576:1048576 	--tlsverify 	--tlscacert /etc/docker/ca.pem 	--tlscert /etc/docker/server.pem 	--tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12 
	ExecReload=/bin/kill -s HUP $MAINPID
	
	# Having non-zero Limit*s causes performance problems due to accounting overhead
	# in the kernel. We recommend using cgroups to do container-local accounting.
	LimitNOFILE=infinity
	LimitNPROC=infinity
	LimitCORE=infinity
	
	# Uncomment TasksMax if your systemd version supports it.
	# Only systemd 226 and above support this version.
	TasksMax=infinity
	
	# set delegate yes so that systemd does not reset the cgroups of docker containers
	Delegate=yes
	
	# kill only the docker process, not all processes in the cgroup
	KillMode=process
	OOMScoreAdjust=-500
	
	[Install]
	WantedBy=multi-user.target
	
	I1123 08:57:41.178183   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:41.178676   62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
	I1123 08:57:41.178702   62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:41.178902   62386 main.go:143] libmachine: Using SSH client type: native
	I1123 08:57:41.179152   62386 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.39.87 22 <nil> <nil>}
	I1123 08:57:41.179171   62386 main.go:143] libmachine: About to run SSH command:
	sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
	I1123 08:57:42.186295   62386 main.go:143] libmachine: SSH cmd err, output: <nil>: diff: can't stat '/lib/systemd/system/docker.service': No such file or directory
	Created symlink '/etc/systemd/system/multi-user.target.wants/docker.service' → '/usr/lib/systemd/system/docker.service'.
	
	I1123 08:57:42.186331   62386 machine.go:97] duration metric: took 1.918179804s to provisionDockerMachine
	I1123 08:57:42.186347   62386 client.go:176] duration metric: took 22.997600307s to LocalClient.Create
	I1123 08:57:42.186371   62386 start.go:167] duration metric: took 22.997685492s to libmachine.API.Create "newest-cni-078196"
	I1123 08:57:42.186382   62386 start.go:293] postStartSetup for "newest-cni-078196" (driver="kvm2")
	I1123 08:57:42.186396   62386 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
	I1123 08:57:42.186475   62386 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
	I1123 08:57:42.189917   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:42.190351   62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
	I1123 08:57:42.190388   62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:42.190560   62386 sshutil.go:53] new ssh client: &{IP:192.168.39.87 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196/id_rsa Username:docker}
	I1123 08:57:42.283393   62386 ssh_runner.go:195] Run: cat /etc/os-release
	I1123 08:57:42.289999   62386 info.go:137] Remote host: Buildroot 2025.02
	I1123 08:57:42.290030   62386 filesync.go:126] Scanning /home/jenkins/minikube-integration/21966-18241/.minikube/addons for local assets ...
	I1123 08:57:42.290117   62386 filesync.go:126] Scanning /home/jenkins/minikube-integration/21966-18241/.minikube/files for local assets ...
	I1123 08:57:42.290218   62386 filesync.go:149] local asset: /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/ssl/certs/221482.pem -> 221482.pem in /etc/ssl/certs
	I1123 08:57:42.290354   62386 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
	I1123 08:57:42.306924   62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/ssl/certs/221482.pem --> /etc/ssl/certs/221482.pem (1708 bytes)
	I1123 08:57:42.343081   62386 start.go:296] duration metric: took 156.683452ms for postStartSetup
	I1123 08:57:42.347012   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:42.347579   62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
	I1123 08:57:42.347619   62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:42.347939   62386 profile.go:143] Saving config to /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/config.json ...
	I1123 08:57:42.348140   62386 start.go:128] duration metric: took 23.161911818s to createHost
	I1123 08:57:42.350835   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:42.351301   62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
	I1123 08:57:42.351336   62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:42.351513   62386 main.go:143] libmachine: Using SSH client type: native
	I1123 08:57:42.351791   62386 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.39.87 22 <nil> <nil>}
	I1123 08:57:42.351806   62386 main.go:143] libmachine: About to run SSH command:
	date +%s.%N
	I1123 08:57:42.466967   62386 main.go:143] libmachine: SSH cmd err, output: <nil>: 1763888262.440217357
	
	I1123 08:57:42.466993   62386 fix.go:216] guest clock: 1763888262.440217357
	I1123 08:57:42.467001   62386 fix.go:229] Guest: 2025-11-23 08:57:42.440217357 +0000 UTC Remote: 2025-11-23 08:57:42.348151583 +0000 UTC m=+33.279616417 (delta=92.065774ms)
	I1123 08:57:42.467025   62386 fix.go:200] guest clock delta is within tolerance: 92.065774ms
	I1123 08:57:42.467033   62386 start.go:83] releasing machines lock for "newest-cni-078196", held for 23.280957089s
	I1123 08:57:42.471032   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:42.471501   62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
	I1123 08:57:42.471531   62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:42.472456   62386 ssh_runner.go:195] Run: cat /version.json
	I1123 08:57:42.472536   62386 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
	I1123 08:57:42.477011   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:42.477058   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:42.477612   62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
	I1123 08:57:42.477644   62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:42.479664   62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
	I1123 08:57:42.479706   62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:42.480287   62386 sshutil.go:53] new ssh client: &{IP:192.168.39.87 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196/id_rsa Username:docker}
	I1123 08:57:42.480869   62386 sshutil.go:53] new ssh client: &{IP:192.168.39.87 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196/id_rsa Username:docker}
	I1123 08:57:42.593772   62386 ssh_runner.go:195] Run: systemctl --version
	I1123 08:57:42.603410   62386 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
	W1123 08:57:42.614510   62386 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
	I1123 08:57:42.614601   62386 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
	I1123 08:57:42.645967   62386 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
	I1123 08:57:42.646003   62386 start.go:496] detecting cgroup driver to use...
	I1123 08:57:42.646138   62386 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
	" | sudo tee /etc/crictl.yaml"
	I1123 08:57:42.678706   62386 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
	I1123 08:57:42.694705   62386 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
	I1123 08:57:42.713341   62386 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
	I1123 08:57:42.713419   62386 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
	I1123 08:57:42.729085   62386 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
	I1123 08:57:42.747983   62386 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
	I1123 08:57:42.768036   62386 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
	I1123 08:57:42.784061   62386 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
	I1123 08:57:42.803711   62386 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
	I1123 08:57:42.822385   62386 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
	I1123 08:57:42.837748   62386 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1  enable_unprivileged_ports = true|' /etc/containerd/config.toml"
	I1123 08:57:42.858942   62386 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
	I1123 08:57:42.873841   62386 crio.go:166] couldn't verify netfilter by "sudo sysctl net.bridge.bridge-nf-call-iptables" which might be okay. error: sudo sysctl net.bridge.bridge-nf-call-iptables: Process exited with status 1
	stdout:
	
	stderr:
	sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
	I1123 08:57:42.873924   62386 ssh_runner.go:195] Run: sudo modprobe br_netfilter
	I1123 08:57:42.888503   62386 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
	I1123 08:57:42.902894   62386 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1123 08:57:43.087215   62386 ssh_runner.go:195] Run: sudo systemctl restart containerd
	I1123 08:57:43.137011   62386 start.go:496] detecting cgroup driver to use...
	I1123 08:57:43.137115   62386 ssh_runner.go:195] Run: sudo systemctl cat docker.service
	I1123 08:57:43.166541   62386 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
	I1123 08:57:43.198142   62386 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
	I1123 08:57:43.220890   62386 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
	I1123 08:57:43.239791   62386 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
	I1123 08:57:43.260304   62386 ssh_runner.go:195] Run: sudo systemctl stop -f crio
	I1123 08:57:43.296702   62386 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
	I1123 08:57:43.316993   62386 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
	" | sudo tee /etc/crictl.yaml"
	I1123 08:57:43.348493   62386 ssh_runner.go:195] Run: which cri-dockerd
	I1123 08:57:43.353715   62386 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
	I1123 08:57:43.367872   62386 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (192 bytes)
	I1123 08:57:43.391806   62386 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
	I1123 08:57:43.570922   62386 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
	I1123 08:57:43.771497   62386 docker.go:575] configuring docker to use "cgroupfs" as cgroup driver...
	I1123 08:57:43.771641   62386 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
	I1123 08:57:43.796840   62386 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
	I1123 08:57:43.815699   62386 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1123 08:57:43.997592   62386 ssh_runner.go:195] Run: sudo systemctl restart docker
	I1123 08:57:44.541819   62386 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
	I1123 08:57:44.559735   62386 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
	I1123 08:57:44.577562   62386 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
	I1123 08:57:44.595133   62386 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
	I1123 08:57:44.759253   62386 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
	I1123 08:57:44.927897   62386 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1123 08:57:45.126443   62386 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
	I1123 08:57:45.161272   62386 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
	I1123 08:57:45.179561   62386 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1123 08:57:45.365439   62386 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
	I1123 08:57:45.512591   62386 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
	I1123 08:57:45.537318   62386 start.go:543] Will wait 60s for socket path /var/run/cri-dockerd.sock
	I1123 08:57:45.537393   62386 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
	I1123 08:57:45.546577   62386 start.go:564] Will wait 60s for crictl version
	I1123 08:57:45.546657   62386 ssh_runner.go:195] Run: which crictl
	I1123 08:57:45.553243   62386 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
	I1123 08:57:45.597074   62386 start.go:580] Version:  0.1.0
	RuntimeName:  docker
	RuntimeVersion:  28.5.1
	RuntimeApiVersion:  v1
	I1123 08:57:45.597163   62386 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
	I1123 08:57:45.640023   62386 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
	I1123 08:57:45.668409   62386 out.go:252] * Preparing Kubernetes v1.34.1 on Docker 28.5.1 ...
	I1123 08:57:45.671742   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:45.672152   62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
	I1123 08:57:45.672174   62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:45.672386   62386 ssh_runner.go:195] Run: grep 192.168.39.1	host.minikube.internal$ /etc/hosts
	I1123 08:57:45.677208   62386 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.39.1	host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
	I1123 08:57:45.697750   62386 out.go:179]   - kubeadm.pod-network-cidr=10.42.0.0/16
	I1123 08:57:42.471379   62480 out.go:252] * Restarting existing kvm2 VM for "default-k8s-diff-port-925051" ...
	I1123 08:57:42.471439   62480 main.go:143] libmachine: starting domain...
	I1123 08:57:42.471451   62480 main.go:143] libmachine: ensuring networks are active...
	I1123 08:57:42.472371   62480 main.go:143] libmachine: Ensuring network default is active
	I1123 08:57:42.473208   62480 main.go:143] libmachine: Ensuring network mk-default-k8s-diff-port-925051 is active
	I1123 08:57:42.474158   62480 main.go:143] libmachine: getting domain XML...
	I1123 08:57:42.476521   62480 main.go:143] libmachine: starting domain XML:
	<domain type='kvm'>
	  <name>default-k8s-diff-port-925051</name>
	  <uuid>faa8704c-25e4-4eae-b827-cb508c4f9f54</uuid>
	  <memory unit='KiB'>3145728</memory>
	  <currentMemory unit='KiB'>3145728</currentMemory>
	  <vcpu placement='static'>2</vcpu>
	  <os>
	    <type arch='x86_64' machine='pc-i440fx-jammy'>hvm</type>
	    <boot dev='cdrom'/>
	    <boot dev='hd'/>
	    <bootmenu enable='no'/>
	  </os>
	  <features>
	    <acpi/>
	    <apic/>
	    <pae/>
	  </features>
	  <cpu mode='host-passthrough' check='none' migratable='on'/>
	  <clock offset='utc'/>
	  <on_poweroff>destroy</on_poweroff>
	  <on_reboot>restart</on_reboot>
	  <on_crash>destroy</on_crash>
	  <devices>
	    <emulator>/usr/bin/qemu-system-x86_64</emulator>
	    <disk type='file' device='cdrom'>
	      <driver name='qemu' type='raw'/>
	      <source file='/home/jenkins/minikube-integration/21966-18241/.minikube/machines/default-k8s-diff-port-925051/boot2docker.iso'/>
	      <target dev='hdc' bus='scsi'/>
	      <readonly/>
	      <address type='drive' controller='0' bus='0' target='0' unit='2'/>
	    </disk>
	    <disk type='file' device='disk'>
	      <driver name='qemu' type='raw' io='threads'/>
	      <source file='/home/jenkins/minikube-integration/21966-18241/.minikube/machines/default-k8s-diff-port-925051/default-k8s-diff-port-925051.rawdisk'/>
	      <target dev='hda' bus='virtio'/>
	      <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
	    </disk>
	    <controller type='usb' index='0' model='piix3-uhci'>
	      <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>
	    </controller>
	    <controller type='pci' index='0' model='pci-root'/>
	    <controller type='scsi' index='0' model='lsilogic'>
	      <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
	    </controller>
	    <interface type='network'>
	      <mac address='52:54:00:19:c7:db'/>
	      <source network='mk-default-k8s-diff-port-925051'/>
	      <model type='virtio'/>
	      <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
	    </interface>
	    <interface type='network'>
	      <mac address='52:54:00:fd:c0:c5'/>
	      <source network='default'/>
	      <model type='virtio'/>
	      <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
	    </interface>
	    <serial type='pty'>
	      <target type='isa-serial' port='0'>
	        <model name='isa-serial'/>
	      </target>
	    </serial>
	    <console type='pty'>
	      <target type='serial' port='0'/>
	    </console>
	    <input type='mouse' bus='ps2'/>
	    <input type='keyboard' bus='ps2'/>
	    <audio id='1' type='none'/>
	    <memballoon model='virtio'>
	      <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
	    </memballoon>
	    <rng model='virtio'>
	      <backend model='random'>/dev/random</backend>
	      <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
	    </rng>
	  </devices>
	</domain>
	
	I1123 08:57:44.035948   62480 main.go:143] libmachine: waiting for domain to start...
	I1123 08:57:44.037946   62480 main.go:143] libmachine: domain is now running
	I1123 08:57:44.037965   62480 main.go:143] libmachine: waiting for IP...
	I1123 08:57:44.039014   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:57:44.039860   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has current primary IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:57:44.039874   62480 main.go:143] libmachine: found domain IP: 192.168.83.137
	I1123 08:57:44.039880   62480 main.go:143] libmachine: reserving static IP address...
	I1123 08:57:44.040364   62480 main.go:143] libmachine: found host DHCP lease matching {name: "default-k8s-diff-port-925051", mac: "52:54:00:19:c7:db", ip: "192.168.83.137"} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:55:37 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
	I1123 08:57:44.040404   62480 main.go:143] libmachine: skip adding static IP to network mk-default-k8s-diff-port-925051 - found existing host DHCP lease matching {name: "default-k8s-diff-port-925051", mac: "52:54:00:19:c7:db", ip: "192.168.83.137"}
	I1123 08:57:44.040416   62480 main.go:143] libmachine: reserved static IP address 192.168.83.137 for domain default-k8s-diff-port-925051
	I1123 08:57:44.040421   62480 main.go:143] libmachine: waiting for SSH...
	I1123 08:57:44.040425   62480 main.go:143] libmachine: Getting to WaitForSSH function...
	I1123 08:57:44.043072   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:57:44.043526   62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:55:37 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
	I1123 08:57:44.043551   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:57:44.043747   62480 main.go:143] libmachine: Using SSH client type: native
	I1123 08:57:44.044097   62480 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.83.137 22 <nil> <nil>}
	I1123 08:57:44.044119   62480 main.go:143] libmachine: About to run SSH command:
	exit 0
	W1123 08:57:43.874417   62034 node_ready.go:57] node "embed-certs-059363" has "Ready":"False" status (will retry)
	I1123 08:57:44.875063   62034 node_ready.go:49] node "embed-certs-059363" is "Ready"
	I1123 08:57:44.875101   62034 node_ready.go:38] duration metric: took 8.005319911s for node "embed-certs-059363" to be "Ready" ...
	I1123 08:57:44.875126   62034 api_server.go:52] waiting for apiserver process to appear ...
	I1123 08:57:44.875194   62034 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I1123 08:57:44.908964   62034 api_server.go:72] duration metric: took 8.381553502s to wait for apiserver process to appear ...
	I1123 08:57:44.908993   62034 api_server.go:88] waiting for apiserver healthz status ...
	I1123 08:57:44.909013   62034 api_server.go:253] Checking apiserver healthz at https://192.168.72.170:8443/healthz ...
	I1123 08:57:44.924580   62034 api_server.go:279] https://192.168.72.170:8443/healthz returned 200:
	ok
	I1123 08:57:44.927212   62034 api_server.go:141] control plane version: v1.34.1
	I1123 08:57:44.927254   62034 api_server.go:131] duration metric: took 18.252447ms to wait for apiserver health ...
	I1123 08:57:44.927266   62034 system_pods.go:43] waiting for kube-system pods to appear ...
	I1123 08:57:44.936682   62034 system_pods.go:59] 8 kube-system pods found
	I1123 08:57:44.936719   62034 system_pods.go:61] "coredns-66bc5c9577-665gz" [95fc7e21-4842-4c82-8e6a-aacd9494cdaf] Running
	I1123 08:57:44.936727   62034 system_pods.go:61] "etcd-embed-certs-059363" [fa029d3b-b887-4f84-9479-84020bb36c03] Running
	I1123 08:57:44.936746   62034 system_pods.go:61] "kube-apiserver-embed-certs-059363" [4949b4bd-7e15-4092-90e1-215419673b50] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
	I1123 08:57:44.936754   62034 system_pods.go:61] "kube-controller-manager-embed-certs-059363" [4bf4b11c-274e-4bc4-b4f7-39b40f9ea51b] Running
	I1123 08:57:44.936762   62034 system_pods.go:61] "kube-proxy-sjvcr" [73a4ab24-78f1-4223-9e4b-fbf39c225875] Running
	I1123 08:57:44.936772   62034 system_pods.go:61] "kube-scheduler-embed-certs-059363" [2ad27af2-3f59-44b5-b888-c5fee6b5db68] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
	I1123 08:57:44.936780   62034 system_pods.go:61] "metrics-server-746fcd58dc-jc8k8" [93a43ecf-712d-44ba-a709-9bc223d0990e] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
	I1123 08:57:44.936786   62034 system_pods.go:61] "storage-provisioner" [3a6c5ffc-b8ab-4fc3-bdaa-048e59ab4766] Running
	I1123 08:57:44.936794   62034 system_pods.go:74] duration metric: took 9.520766ms to wait for pod list to return data ...
	I1123 08:57:44.936804   62034 default_sa.go:34] waiting for default service account to be created ...
	I1123 08:57:44.948188   62034 default_sa.go:45] found service account: "default"
	I1123 08:57:44.948225   62034 default_sa.go:55] duration metric: took 11.401143ms for default service account to be created ...
	I1123 08:57:44.948255   62034 system_pods.go:116] waiting for k8s-apps to be running ...
	I1123 08:57:44.951719   62034 system_pods.go:86] 8 kube-system pods found
	I1123 08:57:44.951754   62034 system_pods.go:89] "coredns-66bc5c9577-665gz" [95fc7e21-4842-4c82-8e6a-aacd9494cdaf] Running
	I1123 08:57:44.951774   62034 system_pods.go:89] "etcd-embed-certs-059363" [fa029d3b-b887-4f84-9479-84020bb36c03] Running
	I1123 08:57:44.951787   62034 system_pods.go:89] "kube-apiserver-embed-certs-059363" [4949b4bd-7e15-4092-90e1-215419673b50] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
	I1123 08:57:44.951803   62034 system_pods.go:89] "kube-controller-manager-embed-certs-059363" [4bf4b11c-274e-4bc4-b4f7-39b40f9ea51b] Running
	I1123 08:57:44.951812   62034 system_pods.go:89] "kube-proxy-sjvcr" [73a4ab24-78f1-4223-9e4b-fbf39c225875] Running
	I1123 08:57:44.951821   62034 system_pods.go:89] "kube-scheduler-embed-certs-059363" [2ad27af2-3f59-44b5-b888-c5fee6b5db68] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
	I1123 08:57:44.951837   62034 system_pods.go:89] "metrics-server-746fcd58dc-jc8k8" [93a43ecf-712d-44ba-a709-9bc223d0990e] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
	I1123 08:57:44.951850   62034 system_pods.go:89] "storage-provisioner" [3a6c5ffc-b8ab-4fc3-bdaa-048e59ab4766] Running
	I1123 08:57:44.951862   62034 system_pods.go:126] duration metric: took 3.598572ms to wait for k8s-apps to be running ...
	I1123 08:57:44.951872   62034 system_svc.go:44] waiting for kubelet service to be running ....
	I1123 08:57:44.951940   62034 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I1123 08:57:44.981007   62034 system_svc.go:56] duration metric: took 29.122206ms WaitForService to wait for kubelet
	I1123 08:57:44.981059   62034 kubeadm.go:587] duration metric: took 8.453653674s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
	I1123 08:57:44.981082   62034 node_conditions.go:102] verifying NodePressure condition ...
	I1123 08:57:44.985604   62034 node_conditions.go:122] node storage ephemeral capacity is 17734596Ki
	I1123 08:57:44.985627   62034 node_conditions.go:123] node cpu capacity is 2
	I1123 08:57:44.985639   62034 node_conditions.go:105] duration metric: took 4.549928ms to run NodePressure ...
	I1123 08:57:44.985653   62034 start.go:242] waiting for startup goroutines ...
	I1123 08:57:44.985663   62034 start.go:247] waiting for cluster config update ...
	I1123 08:57:44.985678   62034 start.go:256] writing updated cluster config ...
	I1123 08:57:44.986007   62034 ssh_runner.go:195] Run: rm -f paused
	I1123 08:57:44.992429   62034 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
	I1123 08:57:44.997825   62034 pod_ready.go:83] waiting for pod "coredns-66bc5c9577-665gz" in "kube-system" namespace to be "Ready" or be gone ...
	I1123 08:57:45.005294   62034 pod_ready.go:94] pod "coredns-66bc5c9577-665gz" is "Ready"
	I1123 08:57:45.005321   62034 pod_ready.go:86] duration metric: took 7.470836ms for pod "coredns-66bc5c9577-665gz" in "kube-system" namespace to be "Ready" or be gone ...
	I1123 08:57:45.008602   62034 pod_ready.go:83] waiting for pod "etcd-embed-certs-059363" in "kube-system" namespace to be "Ready" or be gone ...
	I1123 08:57:45.017355   62034 pod_ready.go:94] pod "etcd-embed-certs-059363" is "Ready"
	I1123 08:57:45.017385   62034 pod_ready.go:86] duration metric: took 8.758566ms for pod "etcd-embed-certs-059363" in "kube-system" namespace to be "Ready" or be gone ...
	I1123 08:57:45.020737   62034 pod_ready.go:83] waiting for pod "kube-apiserver-embed-certs-059363" in "kube-system" namespace to be "Ready" or be gone ...
	W1123 08:57:47.036716   62034 pod_ready.go:104] pod "kube-apiserver-embed-certs-059363" is not "Ready", error: <nil>
	I1123 08:57:45.699160   62386 kubeadm.go:884] updating cluster {Name:newest-cni-078196 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21924/minikube-v1.37.0-1763503576-21924-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1
.34.1 ClusterName:newest-cni-078196 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:kubeadm Key:pod-network-cidr Value:10.42.0.0/16}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.39.87 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:false default_sa:true extra:false kubelet:false node_ready:false system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144
MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
	I1123 08:57:45.699335   62386 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime docker
	I1123 08:57:45.699438   62386 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
	I1123 08:57:45.722240   62386 docker.go:691] Got preloaded images: 
	I1123 08:57:45.722266   62386 docker.go:697] registry.k8s.io/kube-apiserver:v1.34.1 wasn't preloaded
	I1123 08:57:45.722318   62386 ssh_runner.go:195] Run: sudo cat /var/lib/docker/image/overlay2/repositories.json
	I1123 08:57:45.737539   62386 ssh_runner.go:195] Run: which lz4
	I1123 08:57:45.742521   62386 ssh_runner.go:195] Run: stat -c "%s %y" /preloaded.tar.lz4
	I1123 08:57:45.748122   62386 ssh_runner.go:352] existence check for /preloaded.tar.lz4: stat -c "%s %y" /preloaded.tar.lz4: Process exited with status 1
	stdout:
	
	stderr:
	stat: cannot statx '/preloaded.tar.lz4': No such file or directory
	I1123 08:57:45.748156   62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-docker-overlay2-amd64.tar.lz4 --> /preloaded.tar.lz4 (353378914 bytes)
	I1123 08:57:47.397908   62386 docker.go:655] duration metric: took 1.655425847s to copy over tarball
	I1123 08:57:47.398050   62386 ssh_runner.go:195] Run: sudo tar --xattrs --xattrs-include security.capability -I lz4 -C /var -xf /preloaded.tar.lz4
	I1123 08:57:49.041182   62386 ssh_runner.go:235] Completed: sudo tar --xattrs --xattrs-include security.capability -I lz4 -C /var -xf /preloaded.tar.lz4: (1.643095229s)
	I1123 08:57:49.041212   62386 ssh_runner.go:146] rm: /preloaded.tar.lz4
	I1123 08:57:49.084378   62386 ssh_runner.go:195] Run: sudo cat /var/lib/docker/image/overlay2/repositories.json
	I1123 08:57:49.103760   62386 ssh_runner.go:362] scp memory --> /var/lib/docker/image/overlay2/repositories.json (2632 bytes)
	W1123 08:57:49.601859   62034 pod_ready.go:104] pod "kube-apiserver-embed-certs-059363" is not "Ready", error: <nil>
	I1123 08:57:50.104106   62034 pod_ready.go:94] pod "kube-apiserver-embed-certs-059363" is "Ready"
	I1123 08:57:50.104158   62034 pod_ready.go:86] duration metric: took 5.08337291s for pod "kube-apiserver-embed-certs-059363" in "kube-system" namespace to be "Ready" or be gone ...
	I1123 08:57:50.107546   62034 pod_ready.go:83] waiting for pod "kube-controller-manager-embed-certs-059363" in "kube-system" namespace to be "Ready" or be gone ...
	I1123 08:57:50.115455   62034 pod_ready.go:94] pod "kube-controller-manager-embed-certs-059363" is "Ready"
	I1123 08:57:50.115500   62034 pod_ready.go:86] duration metric: took 7.928459ms for pod "kube-controller-manager-embed-certs-059363" in "kube-system" namespace to be "Ready" or be gone ...
	I1123 08:57:50.119972   62034 pod_ready.go:83] waiting for pod "kube-proxy-sjvcr" in "kube-system" namespace to be "Ready" or be gone ...
	I1123 08:57:50.127595   62034 pod_ready.go:94] pod "kube-proxy-sjvcr" is "Ready"
	I1123 08:57:50.127628   62034 pod_ready.go:86] duration metric: took 7.626091ms for pod "kube-proxy-sjvcr" in "kube-system" namespace to be "Ready" or be gone ...
	I1123 08:57:50.773984   62034 pod_ready.go:83] waiting for pod "kube-scheduler-embed-certs-059363" in "kube-system" namespace to be "Ready" or be gone ...
	I1123 08:57:50.786424   62034 pod_ready.go:94] pod "kube-scheduler-embed-certs-059363" is "Ready"
	I1123 08:57:50.786450   62034 pod_ready.go:86] duration metric: took 12.434457ms for pod "kube-scheduler-embed-certs-059363" in "kube-system" namespace to be "Ready" or be gone ...
	I1123 08:57:50.786464   62034 pod_ready.go:40] duration metric: took 5.79400818s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
	I1123 08:57:50.838926   62034 start.go:625] kubectl: 1.34.2, cluster: 1.34.1 (minor skew: 0)
	I1123 08:57:50.918780   62034 out.go:179] * Done! kubectl is now configured to use "embed-certs-059363" cluster and "default" namespace by default
	I1123 08:57:47.146461   62480 main.go:143] libmachine: Error dialing TCP: dial tcp 192.168.83.137:22: connect: no route to host
	I1123 08:57:49.133800   62386 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
	I1123 08:57:49.157740   62386 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1123 08:57:49.330628   62386 ssh_runner.go:195] Run: sudo systemctl restart docker
	I1123 08:57:52.066864   62386 ssh_runner.go:235] Completed: sudo systemctl restart docker: (2.736192658s)
	I1123 08:57:52.066973   62386 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
	I1123 08:57:52.092926   62386 docker.go:691] Got preloaded images: -- stdout --
	registry.k8s.io/kube-apiserver:v1.34.1
	registry.k8s.io/kube-controller-manager:v1.34.1
	registry.k8s.io/kube-scheduler:v1.34.1
	registry.k8s.io/kube-proxy:v1.34.1
	registry.k8s.io/etcd:3.6.4-0
	registry.k8s.io/pause:3.10.1
	registry.k8s.io/coredns/coredns:v1.12.1
	gcr.io/k8s-minikube/storage-provisioner:v5
	
	-- /stdout --
	I1123 08:57:52.092950   62386 cache_images.go:86] Images are preloaded, skipping loading
	I1123 08:57:52.092962   62386 kubeadm.go:935] updating node { 192.168.39.87 8443 v1.34.1 docker true true} ...
	I1123 08:57:52.093116   62386 kubeadm.go:947] kubelet [Unit]
	Wants=docker.socket
	
	[Service]
	ExecStart=
	ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=newest-cni-078196 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.39.87
	
	[Install]
	 config:
	{KubernetesVersion:v1.34.1 ClusterName:newest-cni-078196 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:kubeadm Key:pod-network-cidr Value:10.42.0.0/16}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
	I1123 08:57:52.093201   62386 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
	I1123 08:57:52.154769   62386 cni.go:84] Creating CNI manager for ""
	I1123 08:57:52.154816   62386 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
	I1123 08:57:52.154857   62386 kubeadm.go:85] Using pod CIDR: 10.42.0.0/16
	I1123 08:57:52.154889   62386 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.42.0.0/16 AdvertiseAddress:192.168.39.87 APIServerPort:8443 KubernetesVersion:v1.34.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:newest-cni-078196 NodeName:newest-cni-078196 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.39.87"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.39.87 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:
/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
	I1123 08:57:52.155043   62386 kubeadm.go:196] kubeadm config:
	apiVersion: kubeadm.k8s.io/v1beta4
	kind: InitConfiguration
	localAPIEndpoint:
	  advertiseAddress: 192.168.39.87
	  bindPort: 8443
	bootstrapTokens:
	  - groups:
	      - system:bootstrappers:kubeadm:default-node-token
	    ttl: 24h0m0s
	    usages:
	      - signing
	      - authentication
	nodeRegistration:
	  criSocket: unix:///var/run/cri-dockerd.sock
	  name: "newest-cni-078196"
	  kubeletExtraArgs:
	    - name: "node-ip"
	      value: "192.168.39.87"
	  taints: []
	---
	apiVersion: kubeadm.k8s.io/v1beta4
	kind: ClusterConfiguration
	apiServer:
	  certSANs: ["127.0.0.1", "localhost", "192.168.39.87"]
	  extraArgs:
	    - name: "enable-admission-plugins"
	      value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
	controllerManager:
	  extraArgs:
	    - name: "allocate-node-cidrs"
	      value: "true"
	    - name: "leader-elect"
	      value: "false"
	scheduler:
	  extraArgs:
	    - name: "leader-elect"
	      value: "false"
	certificatesDir: /var/lib/minikube/certs
	clusterName: mk
	controlPlaneEndpoint: control-plane.minikube.internal:8443
	etcd:
	  local:
	    dataDir: /var/lib/minikube/etcd
	kubernetesVersion: v1.34.1
	networking:
	  dnsDomain: cluster.local
	  podSubnet: "10.42.0.0/16"
	  serviceSubnet: 10.96.0.0/12
	---
	apiVersion: kubelet.config.k8s.io/v1beta1
	kind: KubeletConfiguration
	authentication:
	  x509:
	    clientCAFile: /var/lib/minikube/certs/ca.crt
	cgroupDriver: cgroupfs
	containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
	hairpinMode: hairpin-veth
	runtimeRequestTimeout: 15m
	clusterDomain: "cluster.local"
	# disable disk resource management by default
	imageGCHighThresholdPercent: 100
	evictionHard:
	  nodefs.available: "0%"
	  nodefs.inodesFree: "0%"
	  imagefs.available: "0%"
	failSwapOn: false
	staticPodPath: /etc/kubernetes/manifests
	---
	apiVersion: kubeproxy.config.k8s.io/v1alpha1
	kind: KubeProxyConfiguration
	clusterCIDR: "10.42.0.0/16"
	metricsBindAddress: 0.0.0.0:10249
	conntrack:
	  maxPerCore: 0
	# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
	  tcpEstablishedTimeout: 0s
	# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
	  tcpCloseWaitTimeout: 0s
	
	I1123 08:57:52.155124   62386 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.1
	I1123 08:57:52.170649   62386 binaries.go:51] Found k8s binaries, skipping transfer
	I1123 08:57:52.170739   62386 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
	I1123 08:57:52.186437   62386 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (317 bytes)
	I1123 08:57:52.209956   62386 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
	I1123 08:57:52.238732   62386 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2219 bytes)
	I1123 08:57:52.263556   62386 ssh_runner.go:195] Run: grep 192.168.39.87	control-plane.minikube.internal$ /etc/hosts
	I1123 08:57:52.269016   62386 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.39.87	control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
	I1123 08:57:52.291438   62386 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1123 08:57:52.468471   62386 ssh_runner.go:195] Run: sudo systemctl start kubelet
	I1123 08:57:52.523082   62386 certs.go:69] Setting up /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196 for IP: 192.168.39.87
	I1123 08:57:52.523106   62386 certs.go:195] generating shared ca certs ...
	I1123 08:57:52.523125   62386 certs.go:227] acquiring lock for ca certs: {Name:mk4438f2b659811ea2f01e009d28f1b857a5024c Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1123 08:57:52.523320   62386 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21966-18241/.minikube/ca.key
	I1123 08:57:52.523383   62386 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21966-18241/.minikube/proxy-client-ca.key
	I1123 08:57:52.523392   62386 certs.go:257] generating profile certs ...
	I1123 08:57:52.523458   62386 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/client.key
	I1123 08:57:52.523471   62386 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/client.crt with IP's: []
	I1123 08:57:52.657113   62386 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/client.crt ...
	I1123 08:57:52.657156   62386 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/client.crt: {Name:mkd4a2297a388c5353f24d63692a9eca2de3895a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1123 08:57:52.657425   62386 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/client.key ...
	I1123 08:57:52.657447   62386 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/client.key: {Name:mk97d3b4437d9c086044675cf55d01816d40a112 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1123 08:57:52.657646   62386 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.key.3441cee4
	I1123 08:57:52.657673   62386 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.crt.3441cee4 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.39.87]
	I1123 08:57:52.753683   62386 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.crt.3441cee4 ...
	I1123 08:57:52.753714   62386 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.crt.3441cee4: {Name:mkbf555d613a4fba5c26a5d85e984e69fa19d66f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1123 08:57:52.753910   62386 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.key.3441cee4 ...
	I1123 08:57:52.753929   62386 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.key.3441cee4: {Name:mk86a1d3d78eb2290d7da0f96ec23ec9d83a7382 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1123 08:57:52.754031   62386 certs.go:382] copying /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.crt.3441cee4 -> /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.crt
	I1123 08:57:52.754133   62386 certs.go:386] copying /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.key.3441cee4 -> /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.key
	I1123 08:57:52.754190   62386 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/proxy-client.key
	I1123 08:57:52.754206   62386 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/proxy-client.crt with IP's: []
	I1123 08:57:52.860620   62386 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/proxy-client.crt ...
	I1123 08:57:52.860647   62386 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/proxy-client.crt: {Name:mk8319204c666212061b0efe79d3f0da238ee7e7 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1123 08:57:52.860851   62386 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/proxy-client.key ...
	I1123 08:57:52.860877   62386 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/proxy-client.key: {Name:mk66bf3abe86bc12c3af12e371d390dfcbb94d6a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1123 08:57:52.861117   62386 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/22148.pem (1338 bytes)
	W1123 08:57:52.861164   62386 certs.go:480] ignoring /home/jenkins/minikube-integration/21966-18241/.minikube/certs/22148_empty.pem, impossibly tiny 0 bytes
	I1123 08:57:52.861180   62386 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca-key.pem (1675 bytes)
	I1123 08:57:52.861225   62386 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem (1082 bytes)
	I1123 08:57:52.861277   62386 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/cert.pem (1123 bytes)
	I1123 08:57:52.861316   62386 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/key.pem (1675 bytes)
	I1123 08:57:52.861376   62386 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/ssl/certs/221482.pem (1708 bytes)
	I1123 08:57:52.861976   62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
	I1123 08:57:52.899377   62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
	I1123 08:57:52.931761   62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
	I1123 08:57:52.966281   62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
	I1123 08:57:53.007390   62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
	I1123 08:57:53.044942   62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
	I1123 08:57:53.087195   62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
	I1123 08:57:53.132412   62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
	I1123 08:57:53.183547   62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
	I1123 08:57:53.239854   62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/certs/22148.pem --> /usr/share/ca-certificates/22148.pem (1338 bytes)
	I1123 08:57:53.286333   62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/ssl/certs/221482.pem --> /usr/share/ca-certificates/221482.pem (1708 bytes)
	I1123 08:57:53.334114   62386 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
	I1123 08:57:53.368550   62386 ssh_runner.go:195] Run: openssl version
	I1123 08:57:53.379200   62386 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/221482.pem && ln -fs /usr/share/ca-certificates/221482.pem /etc/ssl/certs/221482.pem"
	I1123 08:57:53.402310   62386 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/221482.pem
	I1123 08:57:53.409135   62386 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 23 08:02 /usr/share/ca-certificates/221482.pem
	I1123 08:57:53.409206   62386 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/221482.pem
	I1123 08:57:53.420776   62386 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/221482.pem /etc/ssl/certs/3ec20f2e.0"
	I1123 08:57:53.439668   62386 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
	I1123 08:57:53.455152   62386 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
	I1123 08:57:53.463920   62386 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 23 07:56 /usr/share/ca-certificates/minikubeCA.pem
	I1123 08:57:53.463999   62386 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
	I1123 08:57:53.476317   62386 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
	I1123 08:57:53.500779   62386 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/22148.pem && ln -fs /usr/share/ca-certificates/22148.pem /etc/ssl/certs/22148.pem"
	I1123 08:57:53.518199   62386 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/22148.pem
	I1123 08:57:53.524305   62386 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 23 08:02 /usr/share/ca-certificates/22148.pem
	I1123 08:57:53.524381   62386 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/22148.pem
	I1123 08:57:53.535728   62386 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/22148.pem /etc/ssl/certs/51391683.0"
	I1123 08:57:53.552096   62386 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
	I1123 08:57:53.560216   62386 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
	stdout:
	
	stderr:
	stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
	I1123 08:57:53.560306   62386 kubeadm.go:401] StartCluster: {Name:newest-cni-078196 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21924/minikube-v1.37.0-1763503576-21924-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34
.1 ClusterName:newest-cni-078196 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:kubeadm Key:pod-network-cidr Value:10.42.0.0/16}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.39.87 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:false default_sa:true extra:false kubelet:false node_ready:false system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 Mou
ntOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I1123 08:57:53.560470   62386 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
	I1123 08:57:53.580412   62386 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
	I1123 08:57:53.596570   62386 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
	I1123 08:57:53.611293   62386 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
	I1123 08:57:53.630652   62386 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
	stdout:
	
	stderr:
	ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
	ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
	ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
	ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
	I1123 08:57:53.630673   62386 kubeadm.go:158] found existing configuration files:
	
	I1123 08:57:53.630721   62386 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
	I1123 08:57:53.648350   62386 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
	stdout:
	
	stderr:
	grep: /etc/kubernetes/admin.conf: No such file or directory
	I1123 08:57:53.648419   62386 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
	I1123 08:57:53.668086   62386 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
	I1123 08:57:53.682346   62386 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
	stdout:
	
	stderr:
	grep: /etc/kubernetes/kubelet.conf: No such file or directory
	I1123 08:57:53.682427   62386 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
	I1123 08:57:53.696036   62386 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
	I1123 08:57:53.708650   62386 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
	stdout:
	
	stderr:
	grep: /etc/kubernetes/controller-manager.conf: No such file or directory
	I1123 08:57:53.708729   62386 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
	I1123 08:57:53.721869   62386 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
	I1123 08:57:53.733930   62386 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
	stdout:
	
	stderr:
	grep: /etc/kubernetes/scheduler.conf: No such file or directory
	I1123 08:57:53.734006   62386 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
	I1123 08:57:53.747563   62386 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml  --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem"
	I1123 08:57:53.803699   62386 kubeadm.go:319] [init] Using Kubernetes version: v1.34.1
	I1123 08:57:53.803788   62386 kubeadm.go:319] [preflight] Running pre-flight checks
	I1123 08:57:53.933708   62386 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
	I1123 08:57:53.933907   62386 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
	I1123 08:57:53.934039   62386 kubeadm.go:319] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
	I1123 08:57:53.957595   62386 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
	I1123 08:57:53.960282   62386 out.go:252]   - Generating certificates and keys ...
	I1123 08:57:53.960381   62386 kubeadm.go:319] [certs] Using existing ca certificate authority
	I1123 08:57:53.960461   62386 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
	I1123 08:57:53.226464   62480 main.go:143] libmachine: Error dialing TCP: dial tcp 192.168.83.137:22: connect: no route to host
	I1123 08:57:54.308839   62386 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
	I1123 08:57:54.462473   62386 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
	I1123 08:57:54.656673   62386 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
	I1123 08:57:55.051656   62386 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
	I1123 08:57:55.893313   62386 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
	I1123 08:57:55.893649   62386 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [localhost newest-cni-078196] and IPs [192.168.39.87 127.0.0.1 ::1]
	I1123 08:57:56.010218   62386 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
	I1123 08:57:56.010458   62386 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [localhost newest-cni-078196] and IPs [192.168.39.87 127.0.0.1 ::1]
	I1123 08:57:56.117087   62386 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
	I1123 08:57:56.436611   62386 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
	I1123 08:57:56.745597   62386 kubeadm.go:319] [certs] Generating "sa" key and public key
	I1123 08:57:56.745835   62386 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
	I1123 08:57:56.988789   62386 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
	I1123 08:57:57.476516   62386 kubeadm.go:319] [kubeconfig] Writing "super-admin.conf" kubeconfig file
	I1123 08:57:57.662890   62386 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
	I1123 08:57:58.001771   62386 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
	I1123 08:57:58.199479   62386 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
	I1123 08:57:58.201506   62386 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
	I1123 08:57:58.204309   62386 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
	I1123 08:57:58.206280   62386 out.go:252]   - Booting up control plane ...
	I1123 08:57:58.206413   62386 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
	I1123 08:57:58.206524   62386 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
	I1123 08:57:58.206622   62386 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
	I1123 08:57:58.225366   62386 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
	I1123 08:57:58.225656   62386 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
	I1123 08:57:58.233945   62386 kubeadm.go:319] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
	I1123 08:57:58.234118   62386 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
	I1123 08:57:58.234179   62386 kubeadm.go:319] [kubelet-start] Starting the kubelet
	I1123 08:57:58.435406   62386 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
	I1123 08:57:58.435734   62386 kubeadm.go:319] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
	I1123 08:57:57.259625   62480 main.go:143] libmachine: Error dialing TCP: dial tcp 192.168.83.137:22: connect: connection refused
	I1123 08:58:00.375540   62480 main.go:143] libmachine: SSH cmd err, output: <nil>: 
	I1123 08:58:00.379895   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:00.380474   62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
	I1123 08:58:00.380511   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:00.380795   62480 profile.go:143] Saving config to /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/default-k8s-diff-port-925051/config.json ...
	I1123 08:58:00.381087   62480 machine.go:94] provisionDockerMachine start ...
	I1123 08:58:00.384347   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:00.384859   62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
	I1123 08:58:00.384898   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:00.385108   62480 main.go:143] libmachine: Using SSH client type: native
	I1123 08:58:00.385436   62480 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.83.137 22 <nil> <nil>}
	I1123 08:58:00.385456   62480 main.go:143] libmachine: About to run SSH command:
	hostname
	I1123 08:58:00.505124   62480 main.go:143] libmachine: SSH cmd err, output: <nil>: minikube
	
	I1123 08:58:00.505170   62480 buildroot.go:166] provisioning hostname "default-k8s-diff-port-925051"
	I1123 08:58:00.509221   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:00.509702   62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
	I1123 08:58:00.509735   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:00.509925   62480 main.go:143] libmachine: Using SSH client type: native
	I1123 08:58:00.510144   62480 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.83.137 22 <nil> <nil>}
	I1123 08:58:00.510161   62480 main.go:143] libmachine: About to run SSH command:
	sudo hostname default-k8s-diff-port-925051 && echo "default-k8s-diff-port-925051" | sudo tee /etc/hostname
	I1123 08:58:00.644600   62480 main.go:143] libmachine: SSH cmd err, output: <nil>: default-k8s-diff-port-925051
	
	I1123 08:58:00.648066   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:00.648604   62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
	I1123 08:58:00.648630   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:00.648845   62480 main.go:143] libmachine: Using SSH client type: native
	I1123 08:58:00.649045   62480 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.83.137 22 <nil> <nil>}
	I1123 08:58:00.649060   62480 main.go:143] libmachine: About to run SSH command:
	
			if ! grep -xq '.*\sdefault-k8s-diff-port-925051' /etc/hosts; then
				if grep -xq '127.0.1.1\s.*' /etc/hosts; then
					sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 default-k8s-diff-port-925051/g' /etc/hosts;
				else 
					echo '127.0.1.1 default-k8s-diff-port-925051' | sudo tee -a /etc/hosts; 
				fi
			fi
	I1123 08:58:00.768996   62480 main.go:143] libmachine: SSH cmd err, output: <nil>: 
	I1123 08:58:00.769030   62480 buildroot.go:172] set auth options {CertDir:/home/jenkins/minikube-integration/21966-18241/.minikube CaCertPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21966-18241/.minikube}
	I1123 08:58:00.769067   62480 buildroot.go:174] setting up certificates
	I1123 08:58:00.769088   62480 provision.go:84] configureAuth start
	I1123 08:58:00.772355   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:00.772869   62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
	I1123 08:58:00.772909   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:00.775615   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:00.776035   62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
	I1123 08:58:00.776086   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:00.776228   62480 provision.go:143] copyHostCerts
	I1123 08:58:00.776306   62480 exec_runner.go:144] found /home/jenkins/minikube-integration/21966-18241/.minikube/ca.pem, removing ...
	I1123 08:58:00.776319   62480 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21966-18241/.minikube/ca.pem
	I1123 08:58:00.776391   62480 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21966-18241/.minikube/ca.pem (1082 bytes)
	I1123 08:58:00.776518   62480 exec_runner.go:144] found /home/jenkins/minikube-integration/21966-18241/.minikube/cert.pem, removing ...
	I1123 08:58:00.776529   62480 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21966-18241/.minikube/cert.pem
	I1123 08:58:00.776558   62480 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21966-18241/.minikube/cert.pem (1123 bytes)
	I1123 08:58:00.776642   62480 exec_runner.go:144] found /home/jenkins/minikube-integration/21966-18241/.minikube/key.pem, removing ...
	I1123 08:58:00.776653   62480 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21966-18241/.minikube/key.pem
	I1123 08:58:00.776678   62480 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21966-18241/.minikube/key.pem (1675 bytes)
	I1123 08:58:00.776751   62480 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21966-18241/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca-key.pem org=jenkins.default-k8s-diff-port-925051 san=[127.0.0.1 192.168.83.137 default-k8s-diff-port-925051 localhost minikube]
	I1123 08:58:00.949651   62480 provision.go:177] copyRemoteCerts
	I1123 08:58:00.949711   62480 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
	I1123 08:58:00.952558   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:00.952960   62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
	I1123 08:58:00.952982   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:00.953136   62480 sshutil.go:53] new ssh client: &{IP:192.168.83.137 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/default-k8s-diff-port-925051/id_rsa Username:docker}
	I1123 08:58:01.044089   62480 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
	I1123 08:58:01.077898   62480 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
	I1123 08:58:01.115919   62480 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/machines/server.pem --> /etc/docker/server.pem (1249 bytes)
	I1123 08:58:01.157254   62480 provision.go:87] duration metric: took 388.131412ms to configureAuth
	I1123 08:58:01.157285   62480 buildroot.go:189] setting minikube options for container-runtime
	I1123 08:58:01.157510   62480 config.go:182] Loaded profile config "default-k8s-diff-port-925051": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
	I1123 08:58:01.160663   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:01.161248   62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
	I1123 08:58:01.161295   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:01.161496   62480 main.go:143] libmachine: Using SSH client type: native
	I1123 08:58:01.161777   62480 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.83.137 22 <nil> <nil>}
	I1123 08:58:01.161792   62480 main.go:143] libmachine: About to run SSH command:
	df --output=fstype / | tail -n 1
	I1123 08:58:01.278322   62480 main.go:143] libmachine: SSH cmd err, output: <nil>: tmpfs
	
	I1123 08:58:01.278347   62480 buildroot.go:70] root file system type: tmpfs
	I1123 08:58:01.278524   62480 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
	I1123 08:58:01.281592   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:01.282050   62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
	I1123 08:58:01.282098   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:01.282395   62480 main.go:143] libmachine: Using SSH client type: native
	I1123 08:58:01.282601   62480 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.83.137 22 <nil> <nil>}
	I1123 08:58:01.282650   62480 main.go:143] libmachine: About to run SSH command:
	sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
	Description=Docker Application Container Engine
	Documentation=https://docs.docker.com
	After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
	Wants=network-online.target containerd.service
	Requires=docker.socket
	StartLimitBurst=3
	StartLimitIntervalSec=60
	[Service]
	Type=notify
	Restart=always
	
	
	
	ExecStart=
	ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 \
		-H fd:// --containerd=/run/containerd/containerd.sock \
		-H unix:///var/run/docker.sock \
		--default-ulimit=nofile=1048576:1048576 \
		--tlsverify \
		--tlscacert /etc/docker/ca.pem \
		--tlscert /etc/docker/server.pem \
		--tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12 
	ExecReload=/bin/kill -s HUP \$MAINPID
	
	# Having non-zero Limit*s causes performance problems due to accounting overhead
	# in the kernel. We recommend using cgroups to do container-local accounting.
	LimitNOFILE=infinity
	LimitNPROC=infinity
	LimitCORE=infinity
	
	# Uncomment TasksMax if your systemd version supports it.
	# Only systemd 226 and above support this version.
	TasksMax=infinity
	
	# set delegate yes so that systemd does not reset the cgroups of docker containers
	Delegate=yes
	
	# kill only the docker process, not all processes in the cgroup
	KillMode=process
	OOMScoreAdjust=-500
	
	[Install]
	WantedBy=multi-user.target
	" | sudo tee /lib/systemd/system/docker.service.new
	I1123 08:58:01.426254   62480 main.go:143] libmachine: SSH cmd err, output: <nil>: [Unit]
	Description=Docker Application Container Engine
	Documentation=https://docs.docker.com
	After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
	Wants=network-online.target containerd.service
	Requires=docker.socket
	StartLimitBurst=3
	StartLimitIntervalSec=60
	[Service]
	Type=notify
	Restart=always
	
	
	
	ExecStart=
	ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 	-H fd:// --containerd=/run/containerd/containerd.sock 	-H unix:///var/run/docker.sock 	--default-ulimit=nofile=1048576:1048576 	--tlsverify 	--tlscacert /etc/docker/ca.pem 	--tlscert /etc/docker/server.pem 	--tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12 
	ExecReload=/bin/kill -s HUP $MAINPID
	
	# Having non-zero Limit*s causes performance problems due to accounting overhead
	# in the kernel. We recommend using cgroups to do container-local accounting.
	LimitNOFILE=infinity
	LimitNPROC=infinity
	LimitCORE=infinity
	
	# Uncomment TasksMax if your systemd version supports it.
	# Only systemd 226 and above support this version.
	TasksMax=infinity
	
	# set delegate yes so that systemd does not reset the cgroups of docker containers
	Delegate=yes
	
	# kill only the docker process, not all processes in the cgroup
	KillMode=process
	OOMScoreAdjust=-500
	
	[Install]
	WantedBy=multi-user.target
	
	I1123 08:58:01.429123   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:01.429531   62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
	I1123 08:58:01.429561   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:01.429727   62480 main.go:143] libmachine: Using SSH client type: native
	I1123 08:58:01.429945   62480 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.83.137 22 <nil> <nil>}
	I1123 08:58:01.429968   62480 main.go:143] libmachine: About to run SSH command:
	sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
	I1123 08:57:59.438296   62386 kubeadm.go:319] [kubelet-check] The kubelet is healthy after 1.003129845s
	I1123 08:57:59.442059   62386 kubeadm.go:319] [control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s
	I1123 08:57:59.442209   62386 kubeadm.go:319] [control-plane-check] Checking kube-apiserver at https://192.168.39.87:8443/livez
	I1123 08:57:59.442348   62386 kubeadm.go:319] [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz
	I1123 08:57:59.442479   62386 kubeadm.go:319] [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez
	I1123 08:58:01.938904   62386 kubeadm.go:319] [control-plane-check] kube-controller-manager is healthy after 2.497307336s
	I1123 08:58:03.405770   62386 kubeadm.go:319] [control-plane-check] kube-scheduler is healthy after 3.965160338s
	I1123 08:58:05.442827   62386 kubeadm.go:319] [control-plane-check] kube-apiserver is healthy after 6.002687393s
	I1123 08:58:05.466318   62386 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
	I1123 08:58:05.495033   62386 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
	I1123 08:58:05.522725   62386 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
	I1123 08:58:05.523012   62386 kubeadm.go:319] [mark-control-plane] Marking the node newest-cni-078196 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
	I1123 08:58:05.543260   62386 kubeadm.go:319] [bootstrap-token] Using token: dgrodg.6ciokz1biodl2yci
	I1123 08:58:02.622394   62480 main.go:143] libmachine: SSH cmd err, output: <nil>: diff: can't stat '/lib/systemd/system/docker.service': No such file or directory
	Created symlink '/etc/systemd/system/multi-user.target.wants/docker.service' → '/usr/lib/systemd/system/docker.service'.
	
	I1123 08:58:02.622428   62480 machine.go:97] duration metric: took 2.24132298s to provisionDockerMachine
	I1123 08:58:02.622443   62480 start.go:293] postStartSetup for "default-k8s-diff-port-925051" (driver="kvm2")
	I1123 08:58:02.622457   62480 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
	I1123 08:58:02.622522   62480 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
	I1123 08:58:02.625753   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:02.626334   62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
	I1123 08:58:02.626374   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:02.626567   62480 sshutil.go:53] new ssh client: &{IP:192.168.83.137 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/default-k8s-diff-port-925051/id_rsa Username:docker}
	I1123 08:58:02.732392   62480 ssh_runner.go:195] Run: cat /etc/os-release
	I1123 08:58:02.737975   62480 info.go:137] Remote host: Buildroot 2025.02
	I1123 08:58:02.738010   62480 filesync.go:126] Scanning /home/jenkins/minikube-integration/21966-18241/.minikube/addons for local assets ...
	I1123 08:58:02.738111   62480 filesync.go:126] Scanning /home/jenkins/minikube-integration/21966-18241/.minikube/files for local assets ...
	I1123 08:58:02.738225   62480 filesync.go:149] local asset: /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/ssl/certs/221482.pem -> 221482.pem in /etc/ssl/certs
	I1123 08:58:02.738341   62480 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
	I1123 08:58:02.755815   62480 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/ssl/certs/221482.pem --> /etc/ssl/certs/221482.pem (1708 bytes)
	I1123 08:58:02.790325   62480 start.go:296] duration metric: took 167.864143ms for postStartSetup
	I1123 08:58:02.790381   62480 fix.go:56] duration metric: took 20.323185295s for fixHost
	I1123 08:58:02.793471   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:02.793912   62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
	I1123 08:58:02.793950   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:02.794223   62480 main.go:143] libmachine: Using SSH client type: native
	I1123 08:58:02.794447   62480 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.83.137 22 <nil> <nil>}
	I1123 08:58:02.794458   62480 main.go:143] libmachine: About to run SSH command:
	date +%s.%N
	I1123 08:58:02.907310   62480 main.go:143] libmachine: SSH cmd err, output: <nil>: 1763888282.872914256
	
	I1123 08:58:02.907338   62480 fix.go:216] guest clock: 1763888282.872914256
	I1123 08:58:02.907348   62480 fix.go:229] Guest: 2025-11-23 08:58:02.872914256 +0000 UTC Remote: 2025-11-23 08:58:02.790385341 +0000 UTC m=+45.999028572 (delta=82.528915ms)
	I1123 08:58:02.907369   62480 fix.go:200] guest clock delta is within tolerance: 82.528915ms
	I1123 08:58:02.907375   62480 start.go:83] releasing machines lock for "default-k8s-diff-port-925051", held for 20.440202624s
	I1123 08:58:02.910604   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:02.911104   62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
	I1123 08:58:02.911130   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:02.911758   62480 ssh_runner.go:195] Run: cat /version.json
	I1123 08:58:02.911816   62480 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
	I1123 08:58:02.915121   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:02.915430   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:02.915677   62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
	I1123 08:58:02.915710   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:02.915907   62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
	I1123 08:58:02.915942   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:02.915932   62480 sshutil.go:53] new ssh client: &{IP:192.168.83.137 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/default-k8s-diff-port-925051/id_rsa Username:docker}
	I1123 08:58:02.916129   62480 sshutil.go:53] new ssh client: &{IP:192.168.83.137 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/default-k8s-diff-port-925051/id_rsa Username:docker}
	I1123 08:58:03.020815   62480 ssh_runner.go:195] Run: systemctl --version
	I1123 08:58:03.028066   62480 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
	W1123 08:58:03.036089   62480 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
	I1123 08:58:03.036168   62480 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
	I1123 08:58:03.059461   62480 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
	I1123 08:58:03.059497   62480 start.go:496] detecting cgroup driver to use...
	I1123 08:58:03.059639   62480 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
	" | sudo tee /etc/crictl.yaml"
	I1123 08:58:03.085945   62480 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
	I1123 08:58:03.100188   62480 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
	I1123 08:58:03.114121   62480 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
	I1123 08:58:03.114197   62480 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
	I1123 08:58:03.128502   62480 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
	I1123 08:58:03.141941   62480 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
	I1123 08:58:03.155742   62480 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
	I1123 08:58:03.170251   62480 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
	I1123 08:58:03.185473   62480 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
	I1123 08:58:03.199212   62480 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
	I1123 08:58:03.212441   62480 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1  enable_unprivileged_ports = true|' /etc/containerd/config.toml"
	I1123 08:58:03.225457   62480 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
	I1123 08:58:03.237735   62480 crio.go:166] couldn't verify netfilter by "sudo sysctl net.bridge.bridge-nf-call-iptables" which might be okay. error: sudo sysctl net.bridge.bridge-nf-call-iptables: Process exited with status 1
	stdout:
	
	stderr:
	sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
	I1123 08:58:03.237807   62480 ssh_runner.go:195] Run: sudo modprobe br_netfilter
	I1123 08:58:03.251616   62480 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
	I1123 08:58:03.264293   62480 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1123 08:58:03.431052   62480 ssh_runner.go:195] Run: sudo systemctl restart containerd
	I1123 08:58:03.484769   62480 start.go:496] detecting cgroup driver to use...
	I1123 08:58:03.484887   62480 ssh_runner.go:195] Run: sudo systemctl cat docker.service
	I1123 08:58:03.515067   62480 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
	I1123 08:58:03.538674   62480 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
	I1123 08:58:03.566269   62480 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
	I1123 08:58:03.585483   62480 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
	I1123 08:58:03.603778   62480 ssh_runner.go:195] Run: sudo systemctl stop -f crio
	I1123 08:58:03.640497   62480 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
	I1123 08:58:03.659085   62480 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
	" | sudo tee /etc/crictl.yaml"
	I1123 08:58:03.687162   62480 ssh_runner.go:195] Run: which cri-dockerd
	I1123 08:58:03.694175   62480 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
	I1123 08:58:03.712519   62480 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (192 bytes)
	I1123 08:58:03.741521   62480 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
	I1123 08:58:03.916394   62480 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
	I1123 08:58:04.069031   62480 docker.go:575] configuring docker to use "cgroupfs" as cgroup driver...
	I1123 08:58:04.069190   62480 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
	I1123 08:58:04.093301   62480 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
	I1123 08:58:04.109417   62480 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1123 08:58:04.272454   62480 ssh_runner.go:195] Run: sudo systemctl restart docker
	I1123 08:58:04.931701   62480 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
	I1123 08:58:04.948944   62480 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
	I1123 08:58:04.971544   62480 ssh_runner.go:195] Run: sudo systemctl stop cri-docker.socket
	I1123 08:58:05.005474   62480 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
	I1123 08:58:05.031097   62480 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
	I1123 08:58:05.200507   62480 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
	I1123 08:58:05.394816   62480 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1123 08:58:05.619873   62480 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
	I1123 08:58:05.666855   62480 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
	I1123 08:58:05.685142   62480 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1123 08:58:05.848671   62480 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
	I1123 08:58:05.996045   62480 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
	I1123 08:58:06.018056   62480 start.go:543] Will wait 60s for socket path /var/run/cri-dockerd.sock
	I1123 08:58:06.018168   62480 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
	I1123 08:58:06.026546   62480 start.go:564] Will wait 60s for crictl version
	I1123 08:58:06.026630   62480 ssh_runner.go:195] Run: which crictl
	I1123 08:58:06.032819   62480 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
	I1123 08:58:06.084168   62480 start.go:580] Version:  0.1.0
	RuntimeName:  docker
	RuntimeVersion:  28.5.1
	RuntimeApiVersion:  v1
	I1123 08:58:06.084266   62480 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
	I1123 08:58:06.126882   62480 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
	I1123 08:58:06.163943   62480 out.go:252] * Preparing Kubernetes v1.34.1 on Docker 28.5.1 ...
	I1123 08:58:06.168664   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:06.169284   62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
	I1123 08:58:06.169324   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:06.169553   62480 ssh_runner.go:195] Run: grep 192.168.83.1	host.minikube.internal$ /etc/hosts
	I1123 08:58:06.176801   62480 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.83.1	host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
	I1123 08:58:06.201834   62480 kubeadm.go:884] updating cluster {Name:default-k8s-diff-port-925051 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21924/minikube-v1.37.0-1763503576-21924-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8444 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{Kubernete
sVersion:v1.34.1 ClusterName:default-k8s-diff-port-925051 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.83.137 Port:8444 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: N
etwork: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
	I1123 08:58:06.201979   62480 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime docker
	I1123 08:58:06.202051   62480 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
	I1123 08:58:06.228393   62480 docker.go:691] Got preloaded images: -- stdout --
	gcr.io/k8s-minikube/gvisor-addon:2
	registry.k8s.io/kube-apiserver:v1.34.1
	registry.k8s.io/kube-controller-manager:v1.34.1
	registry.k8s.io/kube-scheduler:v1.34.1
	registry.k8s.io/kube-proxy:v1.34.1
	registry.k8s.io/etcd:3.6.4-0
	registry.k8s.io/pause:3.10.1
	registry.k8s.io/coredns/coredns:v1.12.1
	gcr.io/k8s-minikube/storage-provisioner:v5
	gcr.io/k8s-minikube/busybox:1.28.4-glibc
	
	-- /stdout --
	I1123 08:58:06.228418   62480 docker.go:621] Images already preloaded, skipping extraction
	I1123 08:58:06.228478   62480 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
	I1123 08:58:06.253832   62480 docker.go:691] Got preloaded images: -- stdout --
	gcr.io/k8s-minikube/gvisor-addon:2
	registry.k8s.io/kube-controller-manager:v1.34.1
	registry.k8s.io/kube-scheduler:v1.34.1
	registry.k8s.io/kube-apiserver:v1.34.1
	registry.k8s.io/kube-proxy:v1.34.1
	registry.k8s.io/etcd:3.6.4-0
	registry.k8s.io/pause:3.10.1
	registry.k8s.io/coredns/coredns:v1.12.1
	gcr.io/k8s-minikube/storage-provisioner:v5
	gcr.io/k8s-minikube/busybox:1.28.4-glibc
	
	-- /stdout --
	I1123 08:58:06.253872   62480 cache_images.go:86] Images are preloaded, skipping loading
	I1123 08:58:06.253886   62480 kubeadm.go:935] updating node { 192.168.83.137 8444 v1.34.1 docker true true} ...
	I1123 08:58:06.254046   62480 kubeadm.go:947] kubelet [Unit]
	Wants=docker.socket
	
	[Service]
	ExecStart=
	ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=default-k8s-diff-port-925051 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.83.137
	
	[Install]
	 config:
	{KubernetesVersion:v1.34.1 ClusterName:default-k8s-diff-port-925051 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
	I1123 08:58:06.254117   62480 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
	I1123 08:58:06.333361   62480 cni.go:84] Creating CNI manager for ""
	I1123 08:58:06.333408   62480 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
	I1123 08:58:06.333432   62480 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
	I1123 08:58:06.333457   62480 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.83.137 APIServerPort:8444 KubernetesVersion:v1.34.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:default-k8s-diff-port-925051 NodeName:default-k8s-diff-port-925051 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.83.137"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.83.137 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/c
erts/ca.crt StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
	I1123 08:58:06.333702   62480 kubeadm.go:196] kubeadm config:
	apiVersion: kubeadm.k8s.io/v1beta4
	kind: InitConfiguration
	localAPIEndpoint:
	  advertiseAddress: 192.168.83.137
	  bindPort: 8444
	bootstrapTokens:
	  - groups:
	      - system:bootstrappers:kubeadm:default-node-token
	    ttl: 24h0m0s
	    usages:
	      - signing
	      - authentication
	nodeRegistration:
	  criSocket: unix:///var/run/cri-dockerd.sock
	  name: "default-k8s-diff-port-925051"
	  kubeletExtraArgs:
	    - name: "node-ip"
	      value: "192.168.83.137"
	  taints: []
	---
	apiVersion: kubeadm.k8s.io/v1beta4
	kind: ClusterConfiguration
	apiServer:
	  certSANs: ["127.0.0.1", "localhost", "192.168.83.137"]
	  extraArgs:
	    - name: "enable-admission-plugins"
	      value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
	controllerManager:
	  extraArgs:
	    - name: "allocate-node-cidrs"
	      value: "true"
	    - name: "leader-elect"
	      value: "false"
	scheduler:
	  extraArgs:
	    - name: "leader-elect"
	      value: "false"
	certificatesDir: /var/lib/minikube/certs
	clusterName: mk
	controlPlaneEndpoint: control-plane.minikube.internal:8444
	etcd:
	  local:
	    dataDir: /var/lib/minikube/etcd
	kubernetesVersion: v1.34.1
	networking:
	  dnsDomain: cluster.local
	  podSubnet: "10.244.0.0/16"
	  serviceSubnet: 10.96.0.0/12
	---
	apiVersion: kubelet.config.k8s.io/v1beta1
	kind: KubeletConfiguration
	authentication:
	  x509:
	    clientCAFile: /var/lib/minikube/certs/ca.crt
	cgroupDriver: cgroupfs
	containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
	hairpinMode: hairpin-veth
	runtimeRequestTimeout: 15m
	clusterDomain: "cluster.local"
	# disable disk resource management by default
	imageGCHighThresholdPercent: 100
	evictionHard:
	  nodefs.available: "0%"
	  nodefs.inodesFree: "0%"
	  imagefs.available: "0%"
	failSwapOn: false
	staticPodPath: /etc/kubernetes/manifests
	---
	apiVersion: kubeproxy.config.k8s.io/v1alpha1
	kind: KubeProxyConfiguration
	clusterCIDR: "10.244.0.0/16"
	metricsBindAddress: 0.0.0.0:10249
	conntrack:
	  maxPerCore: 0
	# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
	  tcpEstablishedTimeout: 0s
	# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
	  tcpCloseWaitTimeout: 0s
	
	I1123 08:58:06.333784   62480 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.1
	I1123 08:58:06.356565   62480 binaries.go:51] Found k8s binaries, skipping transfer
	I1123 08:58:06.356666   62480 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
	I1123 08:58:06.376736   62480 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (329 bytes)
	I1123 08:58:06.412797   62480 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
	I1123 08:58:06.447785   62480 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2235 bytes)
	I1123 08:58:06.486793   62480 ssh_runner.go:195] Run: grep 192.168.83.137	control-plane.minikube.internal$ /etc/hosts
	I1123 08:58:06.494943   62480 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.83.137	control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
	I1123 08:58:06.522673   62480 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1123 08:58:06.760714   62480 ssh_runner.go:195] Run: sudo systemctl start kubelet
	I1123 08:58:06.816865   62480 certs.go:69] Setting up /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/default-k8s-diff-port-925051 for IP: 192.168.83.137
	I1123 08:58:06.817014   62480 certs.go:195] generating shared ca certs ...
	I1123 08:58:06.817069   62480 certs.go:227] acquiring lock for ca certs: {Name:mk4438f2b659811ea2f01e009d28f1b857a5024c Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1123 08:58:06.817298   62480 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21966-18241/.minikube/ca.key
	I1123 08:58:06.817470   62480 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21966-18241/.minikube/proxy-client-ca.key
	I1123 08:58:06.817524   62480 certs.go:257] generating profile certs ...
	I1123 08:58:06.817689   62480 certs.go:360] skipping valid signed profile cert regeneration for "minikube-user": /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/default-k8s-diff-port-925051/client.key
	I1123 08:58:06.817768   62480 certs.go:360] skipping valid signed profile cert regeneration for "minikube": /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/default-k8s-diff-port-925051/apiserver.key.3e63079d
	I1123 08:58:06.817847   62480 certs.go:360] skipping valid signed profile cert regeneration for "aggregator": /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/default-k8s-diff-port-925051/proxy-client.key
	I1123 08:58:06.818039   62480 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/22148.pem (1338 bytes)
	W1123 08:58:06.818089   62480 certs.go:480] ignoring /home/jenkins/minikube-integration/21966-18241/.minikube/certs/22148_empty.pem, impossibly tiny 0 bytes
	I1123 08:58:06.818100   62480 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca-key.pem (1675 bytes)
	I1123 08:58:06.818136   62480 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem (1082 bytes)
	I1123 08:58:06.818179   62480 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/cert.pem (1123 bytes)
	I1123 08:58:06.818209   62480 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/key.pem (1675 bytes)
	I1123 08:58:06.818301   62480 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/ssl/certs/221482.pem (1708 bytes)
	I1123 08:58:06.819187   62480 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
	I1123 08:58:05.545959   62386 out.go:252]   - Configuring RBAC rules ...
	I1123 08:58:05.546132   62386 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
	I1123 08:58:05.554804   62386 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
	I1123 08:58:05.569723   62386 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
	I1123 08:58:05.574634   62386 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
	I1123 08:58:05.579213   62386 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
	I1123 08:58:05.585176   62386 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
	I1123 08:58:05.855390   62386 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
	I1123 08:58:06.305498   62386 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
	I1123 08:58:06.860572   62386 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
	I1123 08:58:06.862132   62386 kubeadm.go:319] 
	I1123 08:58:06.862300   62386 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
	I1123 08:58:06.862315   62386 kubeadm.go:319] 
	I1123 08:58:06.862459   62386 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
	I1123 08:58:06.862488   62386 kubeadm.go:319] 
	I1123 08:58:06.862544   62386 kubeadm.go:319]   mkdir -p $HOME/.kube
	I1123 08:58:06.862628   62386 kubeadm.go:319]   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
	I1123 08:58:06.862700   62386 kubeadm.go:319]   sudo chown $(id -u):$(id -g) $HOME/.kube/config
	I1123 08:58:06.862710   62386 kubeadm.go:319] 
	I1123 08:58:06.862788   62386 kubeadm.go:319] Alternatively, if you are the root user, you can run:
	I1123 08:58:06.862797   62386 kubeadm.go:319] 
	I1123 08:58:06.862866   62386 kubeadm.go:319]   export KUBECONFIG=/etc/kubernetes/admin.conf
	I1123 08:58:06.862875   62386 kubeadm.go:319] 
	I1123 08:58:06.862984   62386 kubeadm.go:319] You should now deploy a pod network to the cluster.
	I1123 08:58:06.863098   62386 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
	I1123 08:58:06.863220   62386 kubeadm.go:319]   https://kubernetes.io/docs/concepts/cluster-administration/addons/
	I1123 08:58:06.863243   62386 kubeadm.go:319] 
	I1123 08:58:06.863353   62386 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
	I1123 08:58:06.863463   62386 kubeadm.go:319] and service account keys on each node and then running the following as root:
	I1123 08:58:06.863473   62386 kubeadm.go:319] 
	I1123 08:58:06.863589   62386 kubeadm.go:319]   kubeadm join control-plane.minikube.internal:8443 --token dgrodg.6ciokz1biodl2yci \
	I1123 08:58:06.863736   62386 kubeadm.go:319] 	--discovery-token-ca-cert-hash sha256:4395c5eefb8e424e96dd1759797a1c8f0fafb8cddc9a1a46a496a26ff5b9685a \
	I1123 08:58:06.863769   62386 kubeadm.go:319] 	--control-plane 
	I1123 08:58:06.863778   62386 kubeadm.go:319] 
	I1123 08:58:06.863904   62386 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
	I1123 08:58:06.863913   62386 kubeadm.go:319] 
	I1123 08:58:06.864056   62386 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token dgrodg.6ciokz1biodl2yci \
	I1123 08:58:06.864229   62386 kubeadm.go:319] 	--discovery-token-ca-cert-hash sha256:4395c5eefb8e424e96dd1759797a1c8f0fafb8cddc9a1a46a496a26ff5b9685a 
	I1123 08:58:06.865336   62386 kubeadm.go:319] 	[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
	I1123 08:58:06.865367   62386 cni.go:84] Creating CNI manager for ""
	I1123 08:58:06.865396   62386 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
	I1123 08:58:06.867294   62386 out.go:179] * Configuring bridge CNI (Container Networking Interface) ...
	
	
	==> Docker <==
	Nov 23 08:57:20 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:20.403847294Z" level=warning msg="reference for unknown type: application/vnd.docker.distribution.manifest.v1+prettyjws" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4"
	Nov 23 08:57:20 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:20.530278754Z" level=warning msg="Error persisting manifest" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" error="error committing manifest to content store: commit failed: unexpected commit digest sha256:eaee4c452b076cdb05b391ed7e75e1ad0aca136665875ab5d7e2f3d9f4675769, expected sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb: failed precondition" remote="registry.k8s.io/echoserver:1.4"
	Nov 23 08:57:20 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:20.530380987Z" level=info msg="Attempting next endpoint for pull after error: Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	Nov 23 08:57:20 no-preload-019660 cri-dockerd[1543]: time="2025-11-23T08:57:20Z" level=info msg="Stop pulling image registry.k8s.io/echoserver:1.4: 1.4: Pulling from echoserver"
	Nov 23 08:57:20 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:20.541222738Z" level=warning msg="Error getting v2 registry: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
	Nov 23 08:57:20 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:20.541313635Z" level=info msg="Attempting next endpoint for pull after error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
	Nov 23 08:57:20 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:20.544639412Z" level=error msg="unexpected HTTP error handling" error="<nil>"
	Nov 23 08:57:20 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:20.544665809Z" level=error msg="Handler for POST /v1.46/images/create returned error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
	Nov 23 08:57:20 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:20.895558802Z" level=info msg="ignoring event" container=5858e2fd1e0f544e020a845d1e9aa15e86c2117c0ebff9dfe1b6f4d96f844434 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
	Nov 23 08:57:21 no-preload-019660 cri-dockerd[1543]: time="2025-11-23T08:57:21Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/f70de02d77443d2041cfe03c25cb36b6f758dd4e678353419ea55ac106e8b68a/resolv.conf as [nameserver 10.96.0.10 search kubernetes-dashboard.svc.cluster.local svc.cluster.local cluster.local options ndots:5]"
	Nov 23 08:57:32 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:32.990740143Z" level=warning msg="reference for unknown type: application/vnd.docker.distribution.manifest.v1+prettyjws" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4"
	Nov 23 08:57:33 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:33.076597693Z" level=warning msg="Error persisting manifest" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" error="error committing manifest to content store: commit failed: unexpected commit digest sha256:eaee4c452b076cdb05b391ed7e75e1ad0aca136665875ab5d7e2f3d9f4675769, expected sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb: failed precondition" remote="registry.k8s.io/echoserver:1.4"
	Nov 23 08:57:33 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:33.076828182Z" level=info msg="Attempting next endpoint for pull after error: Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	Nov 23 08:57:33 no-preload-019660 cri-dockerd[1543]: time="2025-11-23T08:57:33Z" level=info msg="Stop pulling image registry.k8s.io/echoserver:1.4: 1.4: Pulling from echoserver"
	Nov 23 08:57:33 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:33.550212350Z" level=info msg="ignoring event" container=1f0a2f0aefa9b826288b8b721a751f41c880f8daa0983c581ae8b039871db1a1 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
	Nov 23 08:58:07 no-preload-019660 cri-dockerd[1543]: time="2025-11-23T08:58:07Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:10.244.0.0/24,},}"
	Nov 23 08:58:07 no-preload-019660 cri-dockerd[1543]: time="2025-11-23T08:58:07Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-66bc5c9577-lp6jk_kube-system\": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container \"9a49ebff42d5eef5c3e23db2e1ab337396080dea6c13220062ba5e0e48a95cc8\""
	Nov 23 08:58:08 no-preload-019660 dockerd[1171]: time="2025-11-23T08:58:08.760065184Z" level=warning msg="reference for unknown type: application/vnd.docker.distribution.manifest.v1+prettyjws" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4"
	Nov 23 08:58:08 no-preload-019660 dockerd[1171]: time="2025-11-23T08:58:08.863488316Z" level=warning msg="Error persisting manifest" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" error="error committing manifest to content store: commit failed: unexpected commit digest sha256:eaee4c452b076cdb05b391ed7e75e1ad0aca136665875ab5d7e2f3d9f4675769, expected sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb: failed precondition" remote="registry.k8s.io/echoserver:1.4"
	Nov 23 08:58:08 no-preload-019660 dockerd[1171]: time="2025-11-23T08:58:08.863610785Z" level=info msg="Attempting next endpoint for pull after error: Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	Nov 23 08:58:08 no-preload-019660 cri-dockerd[1543]: time="2025-11-23T08:58:08Z" level=info msg="Stop pulling image registry.k8s.io/echoserver:1.4: 1.4: Pulling from echoserver"
	Nov 23 08:58:08 no-preload-019660 dockerd[1171]: time="2025-11-23T08:58:08.897944813Z" level=warning msg="Error getting v2 registry: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
	Nov 23 08:58:08 no-preload-019660 dockerd[1171]: time="2025-11-23T08:58:08.899313923Z" level=info msg="Attempting next endpoint for pull after error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
	Nov 23 08:58:08 no-preload-019660 dockerd[1171]: time="2025-11-23T08:58:08.914470304Z" level=error msg="unexpected HTTP error handling" error="<nil>"
	Nov 23 08:58:08 no-preload-019660 dockerd[1171]: time="2025-11-23T08:58:08.914503647Z" level=error msg="Handler for POST /v1.46/images/create returned error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
	
	
	==> container status <==
	CONTAINER           IMAGE                                                                                                 CREATED              STATE               NAME                      ATTEMPT             POD ID              POD                                         NAMESPACE
	371de4a468901       6e38f40d628db                                                                                         1 second ago         Running             storage-provisioner       2                   a97e7e7100c3a       storage-provisioner                         kube-system
	57ebcdb97431d       kubernetesui/dashboard@sha256:2e500d29e9d5f4a086b908eb8dfe7ecac57d2ab09d65b24f588b1d449841ef93        49 seconds ago       Running             kubernetes-dashboard      0                   644b3c0a17fe8       kubernetes-dashboard-855c9754f9-zh9mv       kubernetes-dashboard
	58768e42678e9       56cc512116c8f                                                                                         51 seconds ago       Running             busybox                   1                   c39a5f42630b0       busybox                                     default
	f7e183883855c       52546a367cc9e                                                                                         51 seconds ago       Running             coredns                   1                   86281d14c8f1e       coredns-66bc5c9577-nj6pk                    kube-system
	1f0a2f0aefa9b       6e38f40d628db                                                                                         About a minute ago   Exited              storage-provisioner       1                   a97e7e7100c3a       storage-provisioner                         kube-system
	8c0537e27a6fb       fc25172553d79                                                                                         About a minute ago   Running             kube-proxy                1                   dd983c999b8f4       kube-proxy-wlb9w                            kube-system
	8deb34aee6ea1       5f1f5298c888d                                                                                         About a minute ago   Running             etcd                      1                   ccce046e98c9b       etcd-no-preload-019660                      kube-system
	1a4750ff7e8cb       c80c8dbafe7dd                                                                                         About a minute ago   Running             kube-controller-manager   1                   e18e6fb700516       kube-controller-manager-no-preload-019660   kube-system
	6929fc4394d1d       c3994bc696102                                                                                         About a minute ago   Running             kube-apiserver            1                   b493d9303993d       kube-apiserver-no-preload-019660            kube-system
	266be5a40ca65       7dd6aaa1717ab                                                                                         About a minute ago   Running             kube-scheduler            1                   a1f3f18719102       kube-scheduler-no-preload-019660            kube-system
	7e459e5ac3043       gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e   2 minutes ago        Exited              busybox                   0                   c0e79a536f316       busybox                                     default
	b5d2ec6064039       52546a367cc9e                                                                                         2 minutes ago        Exited              coredns                   0                   92a72987832f3       coredns-66bc5c9577-nj6pk                    kube-system
	4aea324009fdd       fc25172553d79                                                                                         2 minutes ago        Exited              kube-proxy                0                   adcf7215f30c5       kube-proxy-wlb9w                            kube-system
	57bb06d26ab69       7dd6aaa1717ab                                                                                         2 minutes ago        Exited              kube-scheduler            0                   0e3f3ba5c2b8c       kube-scheduler-no-preload-019660            kube-system
	78433f5a1dee5       5f1f5298c888d                                                                                         2 minutes ago        Exited              etcd                      0                   c90dfb42b9b72       etcd-no-preload-019660                      kube-system
	e0963762dabe6       c80c8dbafe7dd                                                                                         2 minutes ago        Exited              kube-controller-manager   0                   796e38a439eca       kube-controller-manager-no-preload-019660   kube-system
	51985d9c2b5e4       c3994bc696102                                                                                         2 minutes ago        Exited              kube-apiserver            0                   8ec1927039422       kube-apiserver-no-preload-019660            kube-system
	
	
	==> coredns [b5d2ec606403] <==
	[INFO] plugin/ready: Still waiting on: "kubernetes"
	[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
	[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
	[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
	[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
	[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
	[WARNING] plugin/kubernetes: starting server with unsynced Kubernetes API
	.:53
	[INFO] plugin/reload: Running configuration SHA512 = 1b226df79860026c6a52e67daa10d7f0d57ec5b023288ec00c5e05f93523c894564e15b91770d3a07ae1cfbe861d15b37d4a0027e69c546ab112970993a3b03b
	CoreDNS-1.12.1
	linux/amd64, go1.24.1, 707c7c1
	[INFO] plugin/ready: Still waiting on: "kubernetes"
	[INFO] plugin/ready: Still waiting on: "kubernetes"
	[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.32.3/tools/cache/reflector.go:251: failed to list *v1.EndpointSlice: Get "https://10.96.0.1:443/apis/discovery.k8s.io/v1/endpointslices?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
	[ERROR] plugin/kubernetes: Unhandled Error
	[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.32.3/tools/cache/reflector.go:251: failed to list *v1.Namespace: Get "https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
	[ERROR] plugin/kubernetes: Unhandled Error
	[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.32.3/tools/cache/reflector.go:251: failed to list *v1.Service: Get "https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
	[ERROR] plugin/kubernetes: Unhandled Error
	[INFO] Reloading
	[INFO] plugin/reload: Running configuration SHA512 = 6e77f21cd6946b87ec86c565e2060aa5d23c02882cb22fd7a321b5e8cd0c8bdafe21968fcff406405707b988b753da21ecd190fe02329f1b569bfa74920cc0fa
	[INFO] Reloading complete
	[INFO] 127.0.0.1:42110 - 29445 "HINFO IN 9017480915883545082.4400091200596631812. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.103448715s
	[INFO] SIGTERM: Shutting down servers then terminating
	[INFO] plugin/health: Going into lameduck mode for 5s
	
	
	==> coredns [f7e183883855] <==
	maxprocs: Leaving GOMAXPROCS=2: CPU quota undefined
	.:53
	[INFO] plugin/reload: Running configuration SHA512 = 6e77f21cd6946b87ec86c565e2060aa5d23c02882cb22fd7a321b5e8cd0c8bdafe21968fcff406405707b988b753da21ecd190fe02329f1b569bfa74920cc0fa
	CoreDNS-1.12.1
	linux/amd64, go1.24.1, 707c7c1
	[INFO] 127.0.0.1:55083 - 4317 "HINFO IN 4704850718228764652.4547352497864188913. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.118220473s
	
	
	==> describe nodes <==
	Name:               no-preload-019660
	Roles:              control-plane
	Labels:             beta.kubernetes.io/arch=amd64
	                    beta.kubernetes.io/os=linux
	                    kubernetes.io/arch=amd64
	                    kubernetes.io/hostname=no-preload-019660
	                    kubernetes.io/os=linux
	                    minikube.k8s.io/commit=3e219827a5f064cf736992b79e59864301ece66e
	                    minikube.k8s.io/name=no-preload-019660
	                    minikube.k8s.io/primary=true
	                    minikube.k8s.io/updated_at=2025_11_23T08_55_22_0700
	                    minikube.k8s.io/version=v1.37.0
	                    node-role.kubernetes.io/control-plane=
	                    node.kubernetes.io/exclude-from-external-load-balancers=
	Annotations:        node.alpha.kubernetes.io/ttl: 0
	                    volumes.kubernetes.io/controller-managed-attach-detach: true
	CreationTimestamp:  Sun, 23 Nov 2025 08:55:18 +0000
	Taints:             <none>
	Unschedulable:      false
	Lease:
	  HolderIdentity:  no-preload-019660
	  AcquireTime:     <unset>
	  RenewTime:       Sun, 23 Nov 2025 08:58:07 +0000
	Conditions:
	  Type             Status  LastHeartbeatTime                 LastTransitionTime                Reason                       Message
	  ----             ------  -----------------                 ------------------                ------                       -------
	  MemoryPressure   False   Sun, 23 Nov 2025 08:58:07 +0000   Sun, 23 Nov 2025 08:55:14 +0000   KubeletHasSufficientMemory   kubelet has sufficient memory available
	  DiskPressure     False   Sun, 23 Nov 2025 08:58:07 +0000   Sun, 23 Nov 2025 08:55:14 +0000   KubeletHasNoDiskPressure     kubelet has no disk pressure
	  PIDPressure      False   Sun, 23 Nov 2025 08:58:07 +0000   Sun, 23 Nov 2025 08:55:14 +0000   KubeletHasSufficientPID      kubelet has sufficient PID available
	  Ready            True    Sun, 23 Nov 2025 08:58:07 +0000   Sun, 23 Nov 2025 08:57:11 +0000   KubeletReady                 kubelet is posting ready status
	Addresses:
	  InternalIP:  192.168.50.40
	  Hostname:    no-preload-019660
	Capacity:
	  cpu:                2
	  ephemeral-storage:  17734596Ki
	  hugepages-2Mi:      0
	  memory:             3035908Ki
	  pods:               110
	Allocatable:
	  cpu:                2
	  ephemeral-storage:  17734596Ki
	  hugepages-2Mi:      0
	  memory:             3035908Ki
	  pods:               110
	System Info:
	  Machine ID:                 5db77235f15f4a52ad7c561433b2bbe5
	  System UUID:                5db77235-f15f-4a52-ad7c-561433b2bbe5
	  Boot ID:                    7c4938cf-e087-4d48-94a0-7660c53890e7
	  Kernel Version:             6.6.95
	  OS Image:                   Buildroot 2025.02
	  Operating System:           linux
	  Architecture:               amd64
	  Container Runtime Version:  docker://28.5.1
	  Kubelet Version:            v1.34.1
	  Kube-Proxy Version:         
	PodCIDR:                      10.244.0.0/24
	PodCIDRs:                     10.244.0.0/24
	Non-terminated Pods:          (11 in total)
	  Namespace                   Name                                          CPU Requests  CPU Limits  Memory Requests  Memory Limits  Age
	  ---------                   ----                                          ------------  ----------  ---------------  -------------  ---
	  default                     busybox                                       0 (0%)        0 (0%)      0 (0%)           0 (0%)         2m4s
	  kube-system                 coredns-66bc5c9577-nj6pk                      100m (5%)     0 (0%)      70Mi (2%)        170Mi (5%)     2m41s
	  kube-system                 etcd-no-preload-019660                        100m (5%)     0 (0%)      100Mi (3%)       0 (0%)         2m47s
	  kube-system                 kube-apiserver-no-preload-019660              250m (12%)    0 (0%)      0 (0%)           0 (0%)         2m47s
	  kube-system                 kube-controller-manager-no-preload-019660     200m (10%)    0 (0%)      0 (0%)           0 (0%)         2m47s
	  kube-system                 kube-proxy-wlb9w                              0 (0%)        0 (0%)      0 (0%)           0 (0%)         2m42s
	  kube-system                 kube-scheduler-no-preload-019660              100m (5%)     0 (0%)      0 (0%)           0 (0%)         2m47s
	  kube-system                 metrics-server-746fcd58dc-tg8q5               100m (5%)     0 (0%)      200Mi (6%)       0 (0%)         114s
	  kube-system                 storage-provisioner                           0 (0%)        0 (0%)      0 (0%)           0 (0%)         2m39s
	  kubernetes-dashboard        dashboard-metrics-scraper-6ffb444bf9-4965t    0 (0%)        0 (0%)      0 (0%)           0 (0%)         62s
	  kubernetes-dashboard        kubernetes-dashboard-855c9754f9-zh9mv         0 (0%)        0 (0%)      0 (0%)           0 (0%)         62s
	Allocated resources:
	  (Total limits may be over 100 percent, i.e., overcommitted.)
	  Resource           Requests     Limits
	  --------           --------     ------
	  cpu                850m (42%)   0 (0%)
	  memory             370Mi (12%)  170Mi (5%)
	  ephemeral-storage  0 (0%)       0 (0%)
	  hugepages-2Mi      0 (0%)       0 (0%)
	Events:
	  Type     Reason                   Age                    From             Message
	  ----     ------                   ----                   ----             -------
	  Normal   Starting                 2m39s                  kube-proxy       
	  Normal   Starting                 65s                    kube-proxy       
	  Normal   NodeHasSufficientMemory  2m56s (x8 over 2m56s)  kubelet          Node no-preload-019660 status is now: NodeHasSufficientMemory
	  Normal   NodeHasNoDiskPressure    2m56s (x8 over 2m56s)  kubelet          Node no-preload-019660 status is now: NodeHasNoDiskPressure
	  Normal   NodeHasSufficientPID     2m56s (x7 over 2m56s)  kubelet          Node no-preload-019660 status is now: NodeHasSufficientPID
	  Normal   NodeAllocatableEnforced  2m56s                  kubelet          Updated Node Allocatable limit across pods
	  Normal   Starting                 2m47s                  kubelet          Starting kubelet.
	  Normal   NodeAllocatableEnforced  2m47s                  kubelet          Updated Node Allocatable limit across pods
	  Normal   NodeHasSufficientMemory  2m47s                  kubelet          Node no-preload-019660 status is now: NodeHasSufficientMemory
	  Normal   NodeHasNoDiskPressure    2m47s                  kubelet          Node no-preload-019660 status is now: NodeHasNoDiskPressure
	  Normal   NodeHasSufficientPID     2m47s                  kubelet          Node no-preload-019660 status is now: NodeHasSufficientPID
	  Normal   NodeReady                2m43s                  kubelet          Node no-preload-019660 status is now: NodeReady
	  Normal   RegisteredNode           2m42s                  node-controller  Node no-preload-019660 event: Registered Node no-preload-019660 in Controller
	  Normal   Starting                 74s                    kubelet          Starting kubelet.
	  Normal   NodeAllocatableEnforced  74s                    kubelet          Updated Node Allocatable limit across pods
	  Normal   NodeHasNoDiskPressure    73s (x8 over 74s)      kubelet          Node no-preload-019660 status is now: NodeHasNoDiskPressure
	  Normal   NodeHasSufficientPID     73s (x7 over 74s)      kubelet          Node no-preload-019660 status is now: NodeHasSufficientPID
	  Normal   NodeHasSufficientMemory  73s (x8 over 74s)      kubelet          Node no-preload-019660 status is now: NodeHasSufficientMemory
	  Warning  Rebooted                 68s                    kubelet          Node no-preload-019660 has been rebooted, boot id: 7c4938cf-e087-4d48-94a0-7660c53890e7
	  Normal   RegisteredNode           65s                    node-controller  Node no-preload-019660 event: Registered Node no-preload-019660 in Controller
	  Normal   Starting                 2s                     kubelet          Starting kubelet.
	  Normal   NodeAllocatableEnforced  2s                     kubelet          Updated Node Allocatable limit across pods
	  Normal   NodeHasSufficientMemory  2s                     kubelet          Node no-preload-019660 status is now: NodeHasSufficientMemory
	  Normal   NodeHasNoDiskPressure    2s                     kubelet          Node no-preload-019660 status is now: NodeHasNoDiskPressure
	  Normal   NodeHasSufficientPID     2s                     kubelet          Node no-preload-019660 status is now: NodeHasSufficientPID
	
	
	==> dmesg <==
	[Nov23 08:56] Booted with the nomodeset parameter. Only the system framebuffer will be available
	[  +0.000011] acpi PNP0A03:00: fail to add MMCONFIG information, can't access extended configuration space under this bridge
	[  +0.001555] platform regulatory.0: Direct firmware load for regulatory.db failed with error -2
	[  +0.004890] (rpcbind)[121]: rpcbind.service: Referenced but unset environment variable evaluates to an empty string: RPCBIND_OPTIONS
	[  +0.922269] NFSD: Using /var/lib/nfs/v4recovery as the NFSv4 state recovery directory
	[  +0.000017] NFSD: unable to find recovery directory /var/lib/nfs/v4recovery
	[  +0.000002] NFSD: Unable to initialize client recovery tracking! (-2)
	[  +0.557715] kauditd_printk_skb: 29 callbacks suppressed
	[  +0.102404] kauditd_printk_skb: 421 callbacks suppressed
	[Nov23 08:57] kauditd_printk_skb: 165 callbacks suppressed
	[  +4.416704] kauditd_printk_skb: 134 callbacks suppressed
	[  +0.028951] kauditd_printk_skb: 144 callbacks suppressed
	[  +1.212600] kauditd_printk_skb: 93 callbacks suppressed
	[  +0.188677] kauditd_printk_skb: 78 callbacks suppressed
	[Nov23 08:58] kauditd_printk_skb: 35 callbacks suppressed
	
	
	==> etcd [78433f5a1dee] <==
	{"level":"info","ts":"2025-11-23T08:55:27.960210Z","caller":"traceutil/trace.go:172","msg":"trace[1913795349] transaction","detail":"{read_only:false; response_revision:359; number_of_response:1; }","duration":"132.125474ms","start":"2025-11-23T08:55:27.828070Z","end":"2025-11-23T08:55:27.960197Z","steps":["trace[1913795349] 'process raft request'  (duration: 130.470237ms)"],"step_count":1}
	{"level":"warn","ts":"2025-11-23T08:55:27.961326Z","caller":"txn/util.go:93","msg":"apply request took too long","took":"115.093447ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/serviceaccounts/kube-system/service-cidrs-controller\" limit:1 ","response":"range_response_count:1 size:214"}
	{"level":"info","ts":"2025-11-23T08:55:27.961420Z","caller":"traceutil/trace.go:172","msg":"trace[1979015044] range","detail":"{range_begin:/registry/serviceaccounts/kube-system/service-cidrs-controller; range_end:; response_count:1; response_revision:360; }","duration":"115.232691ms","start":"2025-11-23T08:55:27.846179Z","end":"2025-11-23T08:55:27.961412Z","steps":["trace[1979015044] 'agreement among raft nodes before linearized reading'  (duration: 114.979531ms)"],"step_count":1}
	{"level":"info","ts":"2025-11-23T08:55:27.964671Z","caller":"traceutil/trace.go:172","msg":"trace[1629415560] transaction","detail":"{read_only:false; response_revision:361; number_of_response:1; }","duration":"113.511815ms","start":"2025-11-23T08:55:27.851149Z","end":"2025-11-23T08:55:27.964661Z","steps":["trace[1629415560] 'process raft request'  (duration: 111.933576ms)"],"step_count":1}
	{"level":"info","ts":"2025-11-23T08:55:27.965851Z","caller":"traceutil/trace.go:172","msg":"trace[339398896] transaction","detail":"{read_only:false; response_revision:362; number_of_response:1; }","duration":"103.77975ms","start":"2025-11-23T08:55:27.862061Z","end":"2025-11-23T08:55:27.965841Z","steps":["trace[339398896] 'process raft request'  (duration: 102.247209ms)"],"step_count":1}
	{"level":"info","ts":"2025-11-23T08:55:52.232221Z","caller":"traceutil/trace.go:172","msg":"trace[991594023] transaction","detail":"{read_only:false; response_revision:463; number_of_response:1; }","duration":"138.295615ms","start":"2025-11-23T08:55:52.093898Z","end":"2025-11-23T08:55:52.232193Z","steps":["trace[991594023] 'process raft request'  (duration: 138.148011ms)"],"step_count":1}
	{"level":"info","ts":"2025-11-23T08:55:53.110050Z","caller":"traceutil/trace.go:172","msg":"trace[1408655835] transaction","detail":"{read_only:false; response_revision:464; number_of_response:1; }","duration":"111.465311ms","start":"2025-11-23T08:55:52.998570Z","end":"2025-11-23T08:55:53.110036Z","steps":["trace[1408655835] 'process raft request'  (duration: 111.386468ms)"],"step_count":1}
	{"level":"info","ts":"2025-11-23T08:56:16.343294Z","caller":"osutil/interrupt_unix.go:65","msg":"received signal; shutting down","signal":"terminated"}
	{"level":"info","ts":"2025-11-23T08:56:16.343638Z","caller":"embed/etcd.go:426","msg":"closing etcd server","name":"no-preload-019660","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.50.40:2380"],"advertise-client-urls":["https://192.168.50.40:2379"]}
	{"level":"error","ts":"2025-11-23T08:56:16.344971Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"http: Server closed","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*serveCtx).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/serve.go:90"}
	{"level":"error","ts":"2025-11-23T08:56:23.350843Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"http: Server closed","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*serveCtx).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/serve.go:90"}
	{"level":"error","ts":"2025-11-23T08:56:23.350926Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 127.0.0.1:2381: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
	{"level":"info","ts":"2025-11-23T08:56:23.350948Z","caller":"etcdserver/server.go:1281","msg":"skipped leadership transfer for single voting member cluster","local-member-id":"113a167c41258c81","current-leader-member-id":"113a167c41258c81"}
	{"level":"info","ts":"2025-11-23T08:56:23.351067Z","caller":"etcdserver/server.go:2342","msg":"server has stopped; stopping storage version's monitor"}
	{"level":"info","ts":"2025-11-23T08:56:23.351076Z","caller":"etcdserver/server.go:2319","msg":"server has stopped; stopping cluster version's monitor"}
	{"level":"warn","ts":"2025-11-23T08:56:23.353233Z","caller":"embed/serve.go:245","msg":"stopping secure grpc server due to error","error":"accept tcp 127.0.0.1:2379: use of closed network connection"}
	{"level":"warn","ts":"2025-11-23T08:56:23.353335Z","caller":"embed/serve.go:247","msg":"stopped secure grpc server due to error","error":"accept tcp 127.0.0.1:2379: use of closed network connection"}
	{"level":"error","ts":"2025-11-23T08:56:23.353344Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 127.0.0.1:2379: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
	{"level":"warn","ts":"2025-11-23T08:56:23.353381Z","caller":"embed/serve.go:245","msg":"stopping secure grpc server due to error","error":"accept tcp 192.168.50.40:2379: use of closed network connection"}
	{"level":"warn","ts":"2025-11-23T08:56:23.353419Z","caller":"embed/serve.go:247","msg":"stopped secure grpc server due to error","error":"accept tcp 192.168.50.40:2379: use of closed network connection"}
	{"level":"error","ts":"2025-11-23T08:56:23.353428Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 192.168.50.40:2379: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
	{"level":"info","ts":"2025-11-23T08:56:23.359157Z","caller":"embed/etcd.go:621","msg":"stopping serving peer traffic","address":"192.168.50.40:2380"}
	{"level":"error","ts":"2025-11-23T08:56:23.359253Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 192.168.50.40:2380: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
	{"level":"info","ts":"2025-11-23T08:56:23.359488Z","caller":"embed/etcd.go:626","msg":"stopped serving peer traffic","address":"192.168.50.40:2380"}
	{"level":"info","ts":"2025-11-23T08:56:23.359540Z","caller":"embed/etcd.go:428","msg":"closed etcd server","name":"no-preload-019660","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.50.40:2380"],"advertise-client-urls":["https://192.168.50.40:2379"]}
	
	
	==> etcd [8deb34aee6ea] <==
	{"level":"warn","ts":"2025-11-23T08:57:00.099710Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44330","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-11-23T08:57:00.113877Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44336","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-11-23T08:57:00.136374Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44356","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-11-23T08:57:00.145346Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44368","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-11-23T08:57:00.154857Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44394","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-11-23T08:57:00.171909Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44414","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-11-23T08:57:00.185801Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44422","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-11-23T08:57:00.191640Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44442","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-11-23T08:57:00.202370Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44456","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-11-23T08:57:00.212078Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44464","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-11-23T08:57:00.224299Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44490","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-11-23T08:57:00.239703Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44498","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-11-23T08:57:00.248343Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44522","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-11-23T08:57:00.259201Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44546","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-11-23T08:57:00.280884Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44576","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-11-23T08:57:00.303755Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44586","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-11-23T08:57:00.322303Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44610","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-11-23T08:57:00.379317Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44628","server-name":"","error":"EOF"}
	{"level":"info","ts":"2025-11-23T08:57:16.914297Z","caller":"traceutil/trace.go:172","msg":"trace[282693566] transaction","detail":"{read_only:false; response_revision:710; number_of_response:1; }","duration":"165.899912ms","start":"2025-11-23T08:57:16.748378Z","end":"2025-11-23T08:57:16.914278Z","steps":["trace[282693566] 'process raft request'  (duration: 165.731904ms)"],"step_count":1}
	{"level":"info","ts":"2025-11-23T08:57:17.891916Z","caller":"traceutil/trace.go:172","msg":"trace[845827594] linearizableReadLoop","detail":"{readStateIndex:756; appliedIndex:756; }","duration":"162.635779ms","start":"2025-11-23T08:57:17.729260Z","end":"2025-11-23T08:57:17.891896Z","steps":["trace[845827594] 'read index received'  (duration: 162.630099ms)","trace[845827594] 'applied index is now lower than readState.Index'  (duration: 4.7µs)"],"step_count":2}
	{"level":"warn","ts":"2025-11-23T08:57:17.892195Z","caller":"txn/util.go:93","msg":"apply request took too long","took":"162.847621ms","expected-duration":"100ms","prefix":"read-only range ","request":"limit:1 keys_only:true ","response":"range_response_count:0 size:5"}
	{"level":"info","ts":"2025-11-23T08:57:17.892577Z","caller":"traceutil/trace.go:172","msg":"trace[1595377469] transaction","detail":"{read_only:false; response_revision:712; number_of_response:1; }","duration":"262.918033ms","start":"2025-11-23T08:57:17.629632Z","end":"2025-11-23T08:57:17.892550Z","steps":["trace[1595377469] 'process raft request'  (duration: 262.820051ms)"],"step_count":1}
	{"level":"info","ts":"2025-11-23T08:57:17.892238Z","caller":"traceutil/trace.go:172","msg":"trace[1998076635] range","detail":"{range_begin:; range_end:; response_count:0; response_revision:711; }","duration":"162.976ms","start":"2025-11-23T08:57:17.729254Z","end":"2025-11-23T08:57:17.892230Z","steps":["trace[1998076635] 'agreement among raft nodes before linearized reading'  (duration: 162.824778ms)"],"step_count":1}
	{"level":"warn","ts":"2025-11-23T08:57:17.894716Z","caller":"txn/util.go:93","msg":"apply request took too long","took":"130.045976ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/health\" ","response":"range_response_count:0 size:5"}
	{"level":"info","ts":"2025-11-23T08:57:17.894762Z","caller":"traceutil/trace.go:172","msg":"trace[1496763416] range","detail":"{range_begin:/registry/health; range_end:; response_count:0; response_revision:712; }","duration":"130.105624ms","start":"2025-11-23T08:57:17.764650Z","end":"2025-11-23T08:57:17.894756Z","steps":["trace[1496763416] 'agreement among raft nodes before linearized reading'  (duration: 130.023549ms)"],"step_count":1}
	
	
	==> kernel <==
	 08:58:09 up 1 min,  0 users,  load average: 1.58, 0.55, 0.20
	Linux no-preload-019660 6.6.95 #1 SMP PREEMPT_DYNAMIC Wed Nov 19 01:10:03 UTC 2025 x86_64 GNU/Linux
	PRETTY_NAME="Buildroot 2025.02"
	
	
	==> kube-apiserver [51985d9c2b5e] <==
	W1123 08:56:25.707408       1 logging.go:55] [core] [Channel #135 SubChannel #137]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:25.731493       1 logging.go:55] [core] [Channel #63 SubChannel #65]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:25.801488       1 logging.go:55] [core] [Channel #199 SubChannel #201]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:25.835630       1 logging.go:55] [core] [Channel #147 SubChannel #149]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:25.837271       1 logging.go:55] [core] [Channel #251 SubChannel #253]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:25.885167       1 logging.go:55] [core] [Channel #47 SubChannel #49]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:25.919480       1 logging.go:55] [core] [Channel #139 SubChannel #141]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:25.953337       1 logging.go:55] [core] [Channel #91 SubChannel #93]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:25.992450       1 logging.go:55] [core] [Channel #191 SubChannel #193]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:26.001050       1 logging.go:55] [core] [Channel #175 SubChannel #177]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:26.027017       1 logging.go:55] [core] [Channel #115 SubChannel #117]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:26.043092       1 logging.go:55] [core] [Channel #159 SubChannel #161]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:26.075821       1 logging.go:55] [core] [Channel #83 SubChannel #85]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:26.087192       1 logging.go:55] [core] [Channel #67 SubChannel #69]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:26.108299       1 logging.go:55] [core] [Channel #207 SubChannel #209]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:26.143125       1 logging.go:55] [core] [Channel #227 SubChannel #229]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:26.143847       1 logging.go:55] [core] [Channel #27 SubChannel #29]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:26.168146       1 logging.go:55] [core] [Channel #31 SubChannel #33]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:26.194296       1 logging.go:55] [core] [Channel #55 SubChannel #57]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:26.217089       1 logging.go:55] [core] [Channel #143 SubChannel #145]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:26.284415       1 logging.go:55] [core] [Channel #39 SubChannel #41]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:26.304057       1 logging.go:55] [core] [Channel #127 SubChannel #129]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:26.351096       1 logging.go:55] [core] [Channel #151 SubChannel #153]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:26.358315       1 logging.go:55] [core] [Channel #107 SubChannel #109]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:26.398513       1 logging.go:55] [core] [Channel #179 SubChannel #181]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	
	
	==> kube-apiserver [6929fc4394d1] <==
	W1123 08:57:02.240589       1 handler_proxy.go:99] no RequestInfo found in the context
	E1123 08:57:02.241169       1 controller.go:102] "Unhandled Error" err=<
		loading OpenAPI spec for "v1beta1.metrics.k8s.io" failed with: failed to download v1beta1.metrics.k8s.io: failed to retrieve openAPI spec, http error: ResponseCode: 503, Body: service unavailable
		, Header: map[Content-Type:[text/plain; charset=utf-8] X-Content-Type-Options:[nosniff]]
	 > logger="UnhandledError"
	I1123 08:57:02.242304       1 controller.go:109] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Rate Limited Requeue.
	I1123 08:57:03.447397       1 controller.go:667] quota admission added evaluator for: deployments.apps
	I1123 08:57:03.566737       1 controller.go:667] quota admission added evaluator for: daemonsets.apps
	I1123 08:57:03.633482       1 controller.go:667] quota admission added evaluator for: roles.rbac.authorization.k8s.io
	I1123 08:57:03.665173       1 controller.go:667] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
	I1123 08:57:04.456742       1 controller.go:667] quota admission added evaluator for: endpoints
	I1123 08:57:04.822296       1 controller.go:667] quota admission added evaluator for: endpointslices.discovery.k8s.io
	I1123 08:57:04.922886       1 controller.go:667] quota admission added evaluator for: replicasets.apps
	I1123 08:57:06.855489       1 controller.go:667] quota admission added evaluator for: namespaces
	I1123 08:57:07.352680       1 alloc.go:328] "allocated clusterIPs" service="kubernetes-dashboard/kubernetes-dashboard" clusterIPs={"IPv4":"10.100.252.132"}
	I1123 08:57:07.386303       1 alloc.go:328] "allocated clusterIPs" service="kubernetes-dashboard/dashboard-metrics-scraper" clusterIPs={"IPv4":"10.100.154.160"}
	W1123 08:58:06.568683       1 handler_proxy.go:99] no RequestInfo found in the context
	E1123 08:58:06.568889       1 controller.go:102] "Unhandled Error" err=<
		loading OpenAPI spec for "v1beta1.metrics.k8s.io" failed with: failed to download v1beta1.metrics.k8s.io: failed to retrieve openAPI spec, http error: ResponseCode: 503, Body: service unavailable
		, Header: map[Content-Type:[text/plain; charset=utf-8] X-Content-Type-Options:[nosniff]]
	 > logger="UnhandledError"
	I1123 08:58:06.569001       1 controller.go:109] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Rate Limited Requeue.
	W1123 08:58:06.583847       1 handler_proxy.go:99] no RequestInfo found in the context
	E1123 08:58:06.587393       1 controller.go:113] "Unhandled Error" err="loading OpenAPI spec for \"v1beta1.metrics.k8s.io\" failed with: Error, could not get list of group versions for APIService" logger="UnhandledError"
	I1123 08:58:06.587452       1 controller.go:126] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Rate Limited Requeue.
	
	
	==> kube-controller-manager [1a4750ff7e8c] <==
	I1123 08:57:04.478449       1 shared_informer.go:356] "Caches are synced" controller="ClusterRoleAggregator"
	I1123 08:57:04.488570       1 shared_informer.go:356] "Caches are synced" controller="resource quota"
	I1123 08:57:04.494373       1 shared_informer.go:356] "Caches are synced" controller="crt configmap"
	I1123 08:57:04.481772       1 shared_informer.go:356] "Caches are synced" controller="PV protection"
	I1123 08:57:04.502443       1 shared_informer.go:356] "Caches are synced" controller="endpoint_slice"
	I1123 08:57:04.502540       1 shared_informer.go:356] "Caches are synced" controller="deployment"
	I1123 08:57:04.506670       1 shared_informer.go:356] "Caches are synced" controller="ReplicaSet"
	I1123 08:57:04.510647       1 shared_informer.go:356] "Caches are synced" controller="endpoint_slice_mirroring"
	I1123 08:57:04.566367       1 garbagecollector.go:787] "failed to discover some groups" logger="garbage-collector-controller" groups="map[\"metrics.k8s.io/v1beta1\":\"stale GroupVersion discovery: metrics.k8s.io/v1beta1\"]"
	I1123 08:57:04.591835       1 shared_informer.go:349] "Waiting for caches to sync" controller="garbage collector"
	I1123 08:57:04.750206       1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
	I1123 08:57:04.750262       1 garbagecollector.go:154] "Garbage collector: all resource monitors have synced" logger="garbage-collector-controller"
	I1123 08:57:04.750270       1 garbagecollector.go:157] "Proceeding to collect garbage" logger="garbage-collector-controller"
	I1123 08:57:04.793332       1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
	E1123 08:57:07.066560       1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9\" failed with pods \"dashboard-metrics-scraper-6ffb444bf9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
	E1123 08:57:07.102507       1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9\" failed with pods \"dashboard-metrics-scraper-6ffb444bf9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
	E1123 08:57:07.134848       1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9\" failed with pods \"dashboard-metrics-scraper-6ffb444bf9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
	E1123 08:57:07.147364       1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9\" failed with pods \"dashboard-metrics-scraper-6ffb444bf9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
	E1123 08:57:07.152054       1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/kubernetes-dashboard-855c9754f9\" failed with pods \"kubernetes-dashboard-855c9754f9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
	E1123 08:57:07.176406       1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/kubernetes-dashboard-855c9754f9\" failed with pods \"kubernetes-dashboard-855c9754f9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
	E1123 08:57:07.177162       1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9\" failed with pods \"dashboard-metrics-scraper-6ffb444bf9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
	E1123 08:57:07.185205       1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/kubernetes-dashboard-855c9754f9\" failed with pods \"kubernetes-dashboard-855c9754f9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
	I1123 08:57:14.479438       1 node_lifecycle_controller.go:1044] "Controller detected that some Nodes are Ready. Exiting master disruption mode" logger="node-lifecycle-controller"
	I1123 08:58:06.668391       1 garbagecollector.go:787] "failed to discover some groups" logger="garbage-collector-controller" groups="map[\"metrics.k8s.io/v1beta1\":\"stale GroupVersion discovery: metrics.k8s.io/v1beta1\"]"
	E1123 08:58:06.670861       1 resource_quota_controller.go:446] "Unhandled Error" err="unable to retrieve the complete list of server APIs: metrics.k8s.io/v1beta1: stale GroupVersion discovery: metrics.k8s.io/v1beta1" logger="UnhandledError"
	
	
	==> kube-controller-manager [e0963762dabe] <==
	I1123 08:55:27.305673       1 shared_informer.go:356] "Caches are synced" controller="VAC protection"
	I1123 08:55:27.305856       1 shared_informer.go:356] "Caches are synced" controller="disruption"
	I1123 08:55:27.305946       1 shared_informer.go:356] "Caches are synced" controller="namespace"
	I1123 08:55:27.307430       1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-legacy-unknown"
	I1123 08:55:27.307491       1 shared_informer.go:356] "Caches are synced" controller="TTL after finished"
	I1123 08:55:27.307769       1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-kube-apiserver-client"
	I1123 08:55:27.308002       1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
	I1123 08:55:27.311526       1 shared_informer.go:356] "Caches are synced" controller="taint-eviction-controller"
	I1123 08:55:27.320061       1 shared_informer.go:356] "Caches are synced" controller="node"
	I1123 08:55:27.320143       1 range_allocator.go:177] "Sending events to api server" logger="node-ipam-controller"
	I1123 08:55:27.320176       1 range_allocator.go:183] "Starting range CIDR allocator" logger="node-ipam-controller"
	I1123 08:55:27.320181       1 shared_informer.go:349] "Waiting for caches to sync" controller="cidrallocator"
	I1123 08:55:27.320186       1 shared_informer.go:356] "Caches are synced" controller="cidrallocator"
	I1123 08:55:27.323691       1 shared_informer.go:356] "Caches are synced" controller="endpoint_slice_mirroring"
	I1123 08:55:27.332119       1 shared_informer.go:356] "Caches are synced" controller="taint"
	I1123 08:55:27.332230       1 node_lifecycle_controller.go:1221] "Initializing eviction metric for zone" logger="node-lifecycle-controller" zone=""
	I1123 08:55:27.332307       1 node_lifecycle_controller.go:873] "Missing timestamp for Node. Assuming now as a timestamp" logger="node-lifecycle-controller" node="no-preload-019660"
	I1123 08:55:27.332344       1 node_lifecycle_controller.go:1067] "Controller detected that zone is now in new state" logger="node-lifecycle-controller" zone="" newState="Normal"
	I1123 08:55:27.353034       1 shared_informer.go:356] "Caches are synced" controller="validatingadmissionpolicy-status"
	I1123 08:55:27.353188       1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
	I1123 08:55:27.353234       1 garbagecollector.go:154] "Garbage collector: all resource monitors have synced" logger="garbage-collector-controller"
	I1123 08:55:27.353253       1 garbagecollector.go:157] "Proceeding to collect garbage" logger="garbage-collector-controller"
	I1123 08:55:27.355630       1 shared_informer.go:356] "Caches are synced" controller="ClusterRoleAggregator"
	I1123 08:55:27.356002       1 shared_informer.go:356] "Caches are synced" controller="resource quota"
	I1123 08:55:27.484870       1 range_allocator.go:428] "Set node PodCIDR" logger="node-ipam-controller" node="no-preload-019660" podCIDRs=["10.244.0.0/24"]
	
	
	==> kube-proxy [4aea324009fd] <==
	I1123 08:55:29.781436       1 shared_informer.go:349] "Waiting for caches to sync" controller="node informer cache"
	I1123 08:55:29.882143       1 shared_informer.go:356] "Caches are synced" controller="node informer cache"
	I1123 08:55:29.882176       1 server.go:219] "Successfully retrieved NodeIPs" NodeIPs=["192.168.50.40"]
	E1123 08:55:29.882244       1 server.go:256] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
	I1123 08:55:30.206875       1 server_linux.go:103] "No iptables support for family" ipFamily="IPv6" error=<
		error listing chain "POSTROUTING" in table "nat": exit status 3: ip6tables v1.8.9 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)
		Perhaps ip6tables or your kernel needs to be upgraded.
	 >
	I1123 08:55:30.209951       1 server.go:267] "kube-proxy running in single-stack mode" ipFamily="IPv4"
	I1123 08:55:30.210016       1 server_linux.go:132] "Using iptables Proxier"
	I1123 08:55:30.389394       1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
	I1123 08:55:30.398584       1 server.go:527] "Version info" version="v1.34.1"
	I1123 08:55:30.411854       1 server.go:529] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
	I1123 08:55:30.436371       1 config.go:106] "Starting endpoint slice config controller"
	I1123 08:55:30.436400       1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config"
	I1123 08:55:30.436421       1 config.go:403] "Starting serviceCIDR config controller"
	I1123 08:55:30.436428       1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config"
	I1123 08:55:30.441802       1 config.go:200] "Starting service config controller"
	I1123 08:55:30.441827       1 shared_informer.go:349] "Waiting for caches to sync" controller="service config"
	I1123 08:55:30.456879       1 config.go:309] "Starting node config controller"
	I1123 08:55:30.457052       1 shared_informer.go:349] "Waiting for caches to sync" controller="node config"
	I1123 08:55:30.457180       1 shared_informer.go:356] "Caches are synced" controller="node config"
	I1123 08:55:30.537976       1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config"
	I1123 08:55:30.542627       1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config"
	I1123 08:55:30.553889       1 shared_informer.go:356] "Caches are synced" controller="service config"
	
	
	==> kube-proxy [8c0537e27a6f] <==
	I1123 08:57:04.109885       1 shared_informer.go:349] "Waiting for caches to sync" controller="node informer cache"
	I1123 08:57:04.212001       1 shared_informer.go:356] "Caches are synced" controller="node informer cache"
	I1123 08:57:04.212377       1 server.go:219] "Successfully retrieved NodeIPs" NodeIPs=["192.168.50.40"]
	E1123 08:57:04.212492       1 server.go:256] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
	I1123 08:57:04.308881       1 server_linux.go:103] "No iptables support for family" ipFamily="IPv6" error=<
		error listing chain "POSTROUTING" in table "nat": exit status 3: ip6tables v1.8.9 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)
		Perhaps ip6tables or your kernel needs to be upgraded.
	 >
	I1123 08:57:04.309495       1 server.go:267] "kube-proxy running in single-stack mode" ipFamily="IPv4"
	I1123 08:57:04.309923       1 server_linux.go:132] "Using iptables Proxier"
	I1123 08:57:04.335219       1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
	I1123 08:57:04.338659       1 server.go:527] "Version info" version="v1.34.1"
	I1123 08:57:04.339118       1 server.go:529] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
	I1123 08:57:04.356711       1 config.go:200] "Starting service config controller"
	I1123 08:57:04.358780       1 shared_informer.go:349] "Waiting for caches to sync" controller="service config"
	I1123 08:57:04.357281       1 config.go:403] "Starting serviceCIDR config controller"
	I1123 08:57:04.360751       1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config"
	I1123 08:57:04.359340       1 config.go:309] "Starting node config controller"
	I1123 08:57:04.361083       1 shared_informer.go:349] "Waiting for caches to sync" controller="node config"
	I1123 08:57:04.361217       1 shared_informer.go:356] "Caches are synced" controller="node config"
	I1123 08:57:04.357261       1 config.go:106] "Starting endpoint slice config controller"
	I1123 08:57:04.361454       1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config"
	I1123 08:57:04.461112       1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config"
	I1123 08:57:04.461168       1 shared_informer.go:356] "Caches are synced" controller="service config"
	I1123 08:57:04.466392       1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config"
	
	
	==> kube-scheduler [266be5a40ca6] <==
	I1123 08:56:59.176913       1 serving.go:386] Generated self-signed cert in-memory
	W1123 08:57:01.157665       1 requestheader_controller.go:204] Unable to get configmap/extension-apiserver-authentication in kube-system.  Usually fixed by 'kubectl create rolebinding -n kube-system ROLEBINDING_NAME --role=extension-apiserver-authentication-reader --serviceaccount=YOUR_NS:YOUR_SA'
	W1123 08:57:01.157869       1 authentication.go:397] Error looking up in-cluster authentication configuration: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot get resource "configmaps" in API group "" in the namespace "kube-system"
	W1123 08:57:01.157944       1 authentication.go:398] Continuing without authentication configuration. This may treat all requests as anonymous.
	W1123 08:57:01.158050       1 authentication.go:399] To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false
	I1123 08:57:01.217478       1 server.go:175] "Starting Kubernetes Scheduler" version="v1.34.1"
	I1123 08:57:01.217604       1 server.go:177] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
	I1123 08:57:01.228584       1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
	I1123 08:57:01.229023       1 shared_informer.go:349] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
	I1123 08:57:01.231067       1 secure_serving.go:211] Serving securely on 127.0.0.1:10259
	I1123 08:57:01.231467       1 tlsconfig.go:243] "Starting DynamicServingCertificateController"
	I1123 08:57:01.329575       1 shared_informer.go:356] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
	
	
	==> kube-scheduler [57bb06d26ab6] <==
	E1123 08:55:19.477132       1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIStorageCapacity"
	E1123 08:55:19.476999       1 reflector.go:205] "Failed to watch" err="failed to list *v1.ResourceSlice: resourceslices.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceslices\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ResourceSlice"
	E1123 08:55:19.477074       1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolume"
	E1123 08:55:19.478217       1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicationcontrollers\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicationController"
	E1123 08:55:19.478832       1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csinodes\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSINode"
	E1123 08:55:19.479554       1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver"
	E1123 08:55:19.480141       1 reflector.go:205] "Failed to watch" err="failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"storageclasses\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StorageClass"
	E1123 08:55:19.480165       1 reflector.go:205] "Failed to watch" err="failed to list *v1.Namespace: namespaces is forbidden: User \"system:kube-scheduler\" cannot list resource \"namespaces\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Namespace"
	E1123 08:55:19.480360       1 reflector.go:205] "Failed to watch" err="failed to list *v1.ResourceClaim: resourceclaims.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceclaims\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ResourceClaim"
	E1123 08:55:19.480372       1 reflector.go:205] "Failed to watch" err="failed to list *v1.VolumeAttachment: volumeattachments.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"volumeattachments\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.VolumeAttachment"
	E1123 08:55:19.480530       1 reflector.go:205] "Failed to watch" err="failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PodDisruptionBudget"
	E1123 08:55:19.480623       1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicasets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicaSet"
	E1123 08:55:19.481197       1 reflector.go:205] "Failed to watch" err="failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"statefulsets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StatefulSet"
	E1123 08:55:19.482165       1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolumeClaim"
	E1123 08:55:20.289908       1 reflector.go:205] "Failed to watch" err="failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"statefulsets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StatefulSet"
	E1123 08:55:20.337370       1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolumeClaim"
	E1123 08:55:20.366302       1 reflector.go:205] "Failed to watch" err="failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PodDisruptionBudget"
	E1123 08:55:20.425798       1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicationcontrollers\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicationController"
	E1123 08:55:20.483335       1 reflector.go:205] "Failed to watch" err="failed to list *v1.ConfigMap: configmaps \"extension-apiserver-authentication\" is forbidden: User \"system:kube-scheduler\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\"" logger="UnhandledError" reflector="runtime/asm_amd64.s:1700" type="*v1.ConfigMap"
	E1123 08:55:20.494282       1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolume"
	I1123 08:55:23.055993       1 shared_informer.go:356] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
	I1123 08:56:16.316839       1 secure_serving.go:259] Stopped listening on 127.0.0.1:10259
	I1123 08:56:16.317595       1 server.go:263] "[graceful-termination] secure server has stopped listening"
	I1123 08:56:16.317742       1 server.go:265] "[graceful-termination] secure server is exiting"
	E1123 08:56:16.317790       1 run.go:72] "command failed" err="finished without leader elect"
	
	
	==> kubelet <==
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.220241    4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/35ab3b3769cfe633089649c537c4c291-k8s-certs\") pod \"kube-apiserver-no-preload-019660\" (UID: \"35ab3b3769cfe633089649c537c4c291\") " pod="kube-system/kube-apiserver-no-preload-019660"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.220309    4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"flexvolume-dir\" (UniqueName: \"kubernetes.io/host-path/d429a3b7e987da373f8ab85d75d6e509-flexvolume-dir\") pod \"kube-controller-manager-no-preload-019660\" (UID: \"d429a3b7e987da373f8ab85d75d6e509\") " pod="kube-system/kube-controller-manager-no-preload-019660"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.220345    4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/d429a3b7e987da373f8ab85d75d6e509-kubeconfig\") pod \"kube-controller-manager-no-preload-019660\" (UID: \"d429a3b7e987da373f8ab85d75d6e509\") " pod="kube-system/kube-controller-manager-no-preload-019660"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.220366    4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/d429a3b7e987da373f8ab85d75d6e509-usr-share-ca-certificates\") pod \"kube-controller-manager-no-preload-019660\" (UID: \"d429a3b7e987da373f8ab85d75d6e509\") " pod="kube-system/kube-controller-manager-no-preload-019660"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.220392    4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/35ab3b3769cfe633089649c537c4c291-ca-certs\") pod \"kube-apiserver-no-preload-019660\" (UID: \"35ab3b3769cfe633089649c537c4c291\") " pod="kube-system/kube-apiserver-no-preload-019660"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.220412    4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/35ab3b3769cfe633089649c537c4c291-usr-share-ca-certificates\") pod \"kube-apiserver-no-preload-019660\" (UID: \"35ab3b3769cfe633089649c537c4c291\") " pod="kube-system/kube-apiserver-no-preload-019660"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.220431    4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/d429a3b7e987da373f8ab85d75d6e509-ca-certs\") pod \"kube-controller-manager-no-preload-019660\" (UID: \"d429a3b7e987da373f8ab85d75d6e509\") " pod="kube-system/kube-controller-manager-no-preload-019660"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.220451    4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/d429a3b7e987da373f8ab85d75d6e509-k8s-certs\") pod \"kube-controller-manager-no-preload-019660\" (UID: \"d429a3b7e987da373f8ab85d75d6e509\") " pod="kube-system/kube-controller-manager-no-preload-019660"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.220473    4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/0bd61e39ef27cab83cc049d81d34254c-kubeconfig\") pod \"kube-scheduler-no-preload-019660\" (UID: \"0bd61e39ef27cab83cc049d81d34254c\") " pod="kube-system/kube-scheduler-no-preload-019660"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.223516    4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-certs\" (UniqueName: \"kubernetes.io/host-path/4765da838683051a5b8aa163156bdc40-etcd-certs\") pod \"etcd-no-preload-019660\" (UID: \"4765da838683051a5b8aa163156bdc40\") " pod="kube-system/etcd-no-preload-019660"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.224048    4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-data\" (UniqueName: \"kubernetes.io/host-path/4765da838683051a5b8aa163156bdc40-etcd-data\") pod \"etcd-no-preload-019660\" (UID: \"4765da838683051a5b8aa163156bdc40\") " pod="kube-system/etcd-no-preload-019660"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.281626    4357 apiserver.go:52] "Watching apiserver"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.354823    4357 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.428002    4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/92a336c6-9d96-4484-8903-9542086c298e-tmp\") pod \"storage-provisioner\" (UID: \"92a336c6-9d96-4484-8903-9542086c298e\") " pod="kube-system/storage-provisioner"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.428072    4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/fb442967-1590-4196-a0b8-1ed0320182cd-xtables-lock\") pod \"kube-proxy-wlb9w\" (UID: \"fb442967-1590-4196-a0b8-1ed0320182cd\") " pod="kube-system/kube-proxy-wlb9w"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.428146    4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/fb442967-1590-4196-a0b8-1ed0320182cd-lib-modules\") pod \"kube-proxy-wlb9w\" (UID: \"fb442967-1590-4196-a0b8-1ed0320182cd\") " pod="kube-system/kube-proxy-wlb9w"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.612741    4357 scope.go:117] "RemoveContainer" containerID="1f0a2f0aefa9b826288b8b721a751f41c880f8daa0983c581ae8b039871db1a1"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: E1123 08:58:08.874748    4357 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" image="registry.k8s.io/echoserver:1.4"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: E1123 08:58:08.877286    4357 kuberuntime_image.go:43] "Failed to pull image" err="Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" image="registry.k8s.io/echoserver:1.4"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: E1123 08:58:08.878430    4357 kuberuntime_manager.go:1449] "Unhandled Error" err="container dashboard-metrics-scraper start failed in pod dashboard-metrics-scraper-6ffb444bf9-4965t_kubernetes-dashboard(d4a9e601-4647-40d6-a5d8-db1e8e067281): ErrImagePull: Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" logger="UnhandledError"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: E1123 08:58:08.878855    4357 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dashboard-metrics-scraper\" with ErrImagePull: \"Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/\"" pod="kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9-4965t" podUID="d4a9e601-4647-40d6-a5d8-db1e8e067281"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: E1123 08:58:08.918928    4357 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host" image="fake.domain/registry.k8s.io/echoserver:1.4"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: E1123 08:58:08.919810    4357 kuberuntime_image.go:43] "Failed to pull image" err="Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host" image="fake.domain/registry.k8s.io/echoserver:1.4"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: E1123 08:58:08.921110    4357 kuberuntime_manager.go:1449] "Unhandled Error" err="container metrics-server start failed in pod metrics-server-746fcd58dc-tg8q5_kube-system(fb0df7df-58f1-4b52-8193-e19d66dd95bf): ErrImagePull: Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host" logger="UnhandledError"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: E1123 08:58:08.921171    4357 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"metrics-server\" with ErrImagePull: \"Error response from daemon: Get \\\"https://fake.domain/v2/\\\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host\"" pod="kube-system/metrics-server-746fcd58dc-tg8q5" podUID="fb0df7df-58f1-4b52-8193-e19d66dd95bf"
	
	
	==> kubernetes-dashboard [57ebcdb97431] <==
	2025/11/23 08:57:20 Starting overwatch
	2025/11/23 08:57:20 Using namespace: kubernetes-dashboard
	2025/11/23 08:57:20 Using in-cluster config to connect to apiserver
	2025/11/23 08:57:20 Using secret token for csrf signing
	2025/11/23 08:57:20 Initializing csrf token from kubernetes-dashboard-csrf secret
	2025/11/23 08:57:20 Empty token. Generating and storing in a secret kubernetes-dashboard-csrf
	2025/11/23 08:57:20 Successful initial request to the apiserver, version: v1.34.1
	2025/11/23 08:57:20 Generating JWE encryption key
	2025/11/23 08:57:20 New synchronizer has been registered: kubernetes-dashboard-key-holder-kubernetes-dashboard. Starting
	2025/11/23 08:57:20 Starting secret synchronizer for kubernetes-dashboard-key-holder in namespace kubernetes-dashboard
	2025/11/23 08:57:21 Initializing JWE encryption key from synchronized object
	2025/11/23 08:57:21 Creating in-cluster Sidecar client
	2025/11/23 08:57:21 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
	2025/11/23 08:57:21 Serving insecurely on HTTP port: 9090
	2025/11/23 08:58:06 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
	
	
	==> storage-provisioner [1f0a2f0aefa9] <==
	I1123 08:57:03.436717       1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
	F1123 08:57:33.518183       1 main.go:39] error getting server version: Get "https://10.96.0.1:443/version?timeout=32s": dial tcp 10.96.0.1:443: i/o timeout
	
	
	==> storage-provisioner [371de4a46890] <==
	I1123 08:58:09.007550       1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
	I1123 08:58:09.042381       1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
	I1123 08:58:09.044488       1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
	W1123 08:58:09.057366       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	

                                                
                                                
-- /stdout --
helpers_test.go:262: (dbg) Run:  out/minikube-linux-amd64 status --format={{.APIServer}} -p no-preload-019660 -n no-preload-019660
helpers_test.go:269: (dbg) Run:  kubectl --context no-preload-019660 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:280: non-running pods: metrics-server-746fcd58dc-tg8q5 dashboard-metrics-scraper-6ffb444bf9-4965t
helpers_test.go:282: ======> post-mortem[TestStartStop/group/no-preload/serial/Pause]: describe non-running pods <======
helpers_test.go:285: (dbg) Run:  kubectl --context no-preload-019660 describe pod metrics-server-746fcd58dc-tg8q5 dashboard-metrics-scraper-6ffb444bf9-4965t
helpers_test.go:285: (dbg) Non-zero exit: kubectl --context no-preload-019660 describe pod metrics-server-746fcd58dc-tg8q5 dashboard-metrics-scraper-6ffb444bf9-4965t: exit status 1 (79.631105ms)

                                                
                                                
** stderr ** 
	Error from server (NotFound): pods "metrics-server-746fcd58dc-tg8q5" not found
	Error from server (NotFound): pods "dashboard-metrics-scraper-6ffb444bf9-4965t" not found

                                                
                                                
** /stderr **
helpers_test.go:287: kubectl --context no-preload-019660 describe pod metrics-server-746fcd58dc-tg8q5 dashboard-metrics-scraper-6ffb444bf9-4965t: exit status 1
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======>  post-mortem[TestStartStop/group/no-preload/serial/Pause]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:247: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Host}} -p no-preload-019660 -n no-preload-019660
helpers_test.go:252: <<< TestStartStop/group/no-preload/serial/Pause FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======>  post-mortem[TestStartStop/group/no-preload/serial/Pause]: minikube logs <======
helpers_test.go:255: (dbg) Run:  out/minikube-linux-amd64 -p no-preload-019660 logs -n 25
helpers_test.go:255: (dbg) Done: out/minikube-linux-amd64 -p no-preload-019660 logs -n 25: (1.603322662s)
helpers_test.go:260: TestStartStop/group/no-preload/serial/Pause logs: 
-- stdout --
	
	==> Audit <==
	┌─────────┬────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬──────────────────────────────┬─────────┬─────────┬─────────────────────┬─────────────────────┐
	│ COMMAND │                                                                                                        ARGS                                                                                                        │           PROFILE            │  USER   │ VERSION │     START TIME      │      END TIME       │
	├─────────┼────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼──────────────────────────────┼─────────┼─────────┼─────────────────────┼─────────────────────┤
	│ stop    │ -p no-preload-019660 --alsologtostderr -v=3                                                                                                                                                                        │ no-preload-019660            │ jenkins │ v1.37.0 │ 23 Nov 25 08:56 UTC │ 23 Nov 25 08:56 UTC │
	│ addons  │ enable dashboard -p no-preload-019660 --images=MetricsScraper=registry.k8s.io/echoserver:1.4                                                                                                                       │ no-preload-019660            │ jenkins │ v1.37.0 │ 23 Nov 25 08:56 UTC │ 23 Nov 25 08:56 UTC │
	│ start   │ -p no-preload-019660 --memory=3072 --alsologtostderr --wait=true --preload=false --driver=kvm2  --kubernetes-version=v1.34.1                                                                                       │ no-preload-019660            │ jenkins │ v1.37.0 │ 23 Nov 25 08:56 UTC │ 23 Nov 25 08:57 UTC │
	│ addons  │ enable metrics-server -p embed-certs-059363 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain                                                                           │ embed-certs-059363           │ jenkins │ v1.37.0 │ 23 Nov 25 08:56 UTC │ 23 Nov 25 08:56 UTC │
	│ stop    │ -p embed-certs-059363 --alsologtostderr -v=3                                                                                                                                                                       │ embed-certs-059363           │ jenkins │ v1.37.0 │ 23 Nov 25 08:56 UTC │ 23 Nov 25 08:56 UTC │
	│ addons  │ enable dashboard -p embed-certs-059363 --images=MetricsScraper=registry.k8s.io/echoserver:1.4                                                                                                                      │ embed-certs-059363           │ jenkins │ v1.37.0 │ 23 Nov 25 08:56 UTC │ 23 Nov 25 08:56 UTC │
	│ start   │ -p embed-certs-059363 --memory=3072 --alsologtostderr --wait=true --embed-certs --driver=kvm2  --kubernetes-version=v1.34.1                                                                                        │ embed-certs-059363           │ jenkins │ v1.37.0 │ 23 Nov 25 08:56 UTC │ 23 Nov 25 08:57 UTC │
	│ addons  │ enable metrics-server -p default-k8s-diff-port-925051 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain                                                                 │ default-k8s-diff-port-925051 │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │ 23 Nov 25 08:57 UTC │
	│ stop    │ -p default-k8s-diff-port-925051 --alsologtostderr -v=3                                                                                                                                                             │ default-k8s-diff-port-925051 │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │ 23 Nov 25 08:57 UTC │
	│ image   │ old-k8s-version-896471 image list --format=json                                                                                                                                                                    │ old-k8s-version-896471       │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │ 23 Nov 25 08:57 UTC │
	│ pause   │ -p old-k8s-version-896471 --alsologtostderr -v=1                                                                                                                                                                   │ old-k8s-version-896471       │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │ 23 Nov 25 08:57 UTC │
	│ unpause │ -p old-k8s-version-896471 --alsologtostderr -v=1                                                                                                                                                                   │ old-k8s-version-896471       │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │ 23 Nov 25 08:57 UTC │
	│ delete  │ -p old-k8s-version-896471                                                                                                                                                                                          │ old-k8s-version-896471       │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │ 23 Nov 25 08:57 UTC │
	│ delete  │ -p old-k8s-version-896471                                                                                                                                                                                          │ old-k8s-version-896471       │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │ 23 Nov 25 08:57 UTC │
	│ start   │ -p newest-cni-078196 --memory=3072 --alsologtostderr --wait=apiserver,system_pods,default_sa --network-plugin=cni --extra-config=kubeadm.pod-network-cidr=10.42.0.0/16 --driver=kvm2  --kubernetes-version=v1.34.1 │ newest-cni-078196            │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │                     │
	│ addons  │ enable dashboard -p default-k8s-diff-port-925051 --images=MetricsScraper=registry.k8s.io/echoserver:1.4                                                                                                            │ default-k8s-diff-port-925051 │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │ 23 Nov 25 08:57 UTC │
	│ start   │ -p default-k8s-diff-port-925051 --memory=3072 --alsologtostderr --wait=true --apiserver-port=8444 --driver=kvm2  --kubernetes-version=v1.34.1                                                                      │ default-k8s-diff-port-925051 │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │                     │
	│ image   │ no-preload-019660 image list --format=json                                                                                                                                                                         │ no-preload-019660            │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │ 23 Nov 25 08:57 UTC │
	│ pause   │ -p no-preload-019660 --alsologtostderr -v=1                                                                                                                                                                        │ no-preload-019660            │ jenkins │ v1.37.0 │ 23 Nov 25 08:57 UTC │ 23 Nov 25 08:57 UTC │
	│ image   │ embed-certs-059363 image list --format=json                                                                                                                                                                        │ embed-certs-059363           │ jenkins │ v1.37.0 │ 23 Nov 25 08:58 UTC │ 23 Nov 25 08:58 UTC │
	│ pause   │ -p embed-certs-059363 --alsologtostderr -v=1                                                                                                                                                                       │ embed-certs-059363           │ jenkins │ v1.37.0 │ 23 Nov 25 08:58 UTC │ 23 Nov 25 08:58 UTC │
	│ unpause │ -p no-preload-019660 --alsologtostderr -v=1                                                                                                                                                                        │ no-preload-019660            │ jenkins │ v1.37.0 │ 23 Nov 25 08:58 UTC │ 23 Nov 25 08:58 UTC │
	│ unpause │ -p embed-certs-059363 --alsologtostderr -v=1                                                                                                                                                                       │ embed-certs-059363           │ jenkins │ v1.37.0 │ 23 Nov 25 08:58 UTC │ 23 Nov 25 08:58 UTC │
	│ delete  │ -p embed-certs-059363                                                                                                                                                                                              │ embed-certs-059363           │ jenkins │ v1.37.0 │ 23 Nov 25 08:58 UTC │ 23 Nov 25 08:58 UTC │
	│ delete  │ -p embed-certs-059363                                                                                                                                                                                              │ embed-certs-059363           │ jenkins │ v1.37.0 │ 23 Nov 25 08:58 UTC │ 23 Nov 25 08:58 UTC │
	└─────────┴────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────────────────────────────┴─────────┴─────────┴─────────────────────┴─────────────────────┘
	
	
	==> Last Start <==
	Log file created at: 2025/11/23 08:57:16
	Running on machine: ubuntu-20-agent-3
	Binary: Built with gc go1.25.3 for linux/amd64
	Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
	I1123 08:57:16.853497   62480 out.go:360] Setting OutFile to fd 1 ...
	I1123 08:57:16.853743   62480 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I1123 08:57:16.853753   62480 out.go:374] Setting ErrFile to fd 2...
	I1123 08:57:16.853757   62480 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I1123 08:57:16.854434   62480 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21966-18241/.minikube/bin
	I1123 08:57:16.855203   62480 out.go:368] Setting JSON to false
	I1123 08:57:16.856605   62480 start.go:133] hostinfo: {"hostname":"ubuntu-20-agent-3","uptime":5986,"bootTime":1763882251,"procs":197,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"22.04","kernelVersion":"6.8.0-1044-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
	I1123 08:57:16.856696   62480 start.go:143] virtualization: kvm guest
	I1123 08:57:16.935723   62480 out.go:179] * [default-k8s-diff-port-925051] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
	I1123 08:57:16.941914   62480 out.go:179]   - MINIKUBE_LOCATION=21966
	I1123 08:57:16.941916   62480 notify.go:221] Checking for updates...
	I1123 08:57:16.943817   62480 out.go:179]   - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I1123 08:57:16.945573   62480 out.go:179]   - KUBECONFIG=/home/jenkins/minikube-integration/21966-18241/kubeconfig
	I1123 08:57:16.946745   62480 out.go:179]   - MINIKUBE_HOME=/home/jenkins/minikube-integration/21966-18241/.minikube
	I1123 08:57:16.947938   62480 out.go:179]   - MINIKUBE_BIN=out/minikube-linux-amd64
	I1123 08:57:16.949027   62480 out.go:179]   - MINIKUBE_FORCE_SYSTEMD=
	I1123 08:57:16.950511   62480 config.go:182] Loaded profile config "default-k8s-diff-port-925051": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
	I1123 08:57:16.951037   62480 driver.go:422] Setting default libvirt URI to qemu:///system
	I1123 08:57:16.994324   62480 out.go:179] * Using the kvm2 driver based on existing profile
	I1123 08:57:16.995670   62480 start.go:309] selected driver: kvm2
	I1123 08:57:16.995691   62480 start.go:927] validating driver "kvm2" against &{Name:default-k8s-diff-port-925051 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21924/minikube-v1.37.0-1763503576-21924-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8444 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesCo
nfig:{KubernetesVersion:v1.34.1 ClusterName:default-k8s-diff-port-925051 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.83.137 Port:8444 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] L
istenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I1123 08:57:16.995851   62480 start.go:938] status for kvm2: {Installed:true Healthy:true Running:true NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
	I1123 08:57:16.997354   62480 start_flags.go:992] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
	I1123 08:57:16.997396   62480 cni.go:84] Creating CNI manager for ""
	I1123 08:57:16.997466   62480 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
	I1123 08:57:16.997521   62480 start.go:353] cluster config:
	{Name:default-k8s-diff-port-925051 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21924/minikube-v1.37.0-1763503576-21924-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8444 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:default-k8s-diff-port-925051 Names
pace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.83.137 Port:8444 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpir
ation:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I1123 08:57:16.997662   62480 iso.go:125] acquiring lock: {Name:mk9cdb644d601a15f26caa6d527f7a63e06eb691 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I1123 08:57:16.999287   62480 out.go:179] * Starting "default-k8s-diff-port-925051" primary control-plane node in "default-k8s-diff-port-925051" cluster
	I1123 08:57:16.538965   62034 main.go:143] libmachine: SSH cmd err, output: <nil>: 
	I1123 08:57:16.543216   62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:16.543908   62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
	I1123 08:57:16.543934   62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:16.544164   62034 profile.go:143] Saving config to /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/embed-certs-059363/config.json ...
	I1123 08:57:16.544418   62034 machine.go:94] provisionDockerMachine start ...
	I1123 08:57:16.547123   62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:16.547583   62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
	I1123 08:57:16.547608   62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:16.547766   62034 main.go:143] libmachine: Using SSH client type: native
	I1123 08:57:16.547963   62034 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.72.170 22 <nil> <nil>}
	I1123 08:57:16.547972   62034 main.go:143] libmachine: About to run SSH command:
	hostname
	I1123 08:57:16.673771   62034 main.go:143] libmachine: SSH cmd err, output: <nil>: minikube
	
	I1123 08:57:16.673806   62034 buildroot.go:166] provisioning hostname "embed-certs-059363"
	I1123 08:57:16.677167   62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:16.677679   62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
	I1123 08:57:16.677711   62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:16.677931   62034 main.go:143] libmachine: Using SSH client type: native
	I1123 08:57:16.678192   62034 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.72.170 22 <nil> <nil>}
	I1123 08:57:16.678214   62034 main.go:143] libmachine: About to run SSH command:
	sudo hostname embed-certs-059363 && echo "embed-certs-059363" | sudo tee /etc/hostname
	I1123 08:57:16.832499   62034 main.go:143] libmachine: SSH cmd err, output: <nil>: embed-certs-059363
	
	I1123 08:57:16.837251   62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:16.837813   62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
	I1123 08:57:16.837855   62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:16.838109   62034 main.go:143] libmachine: Using SSH client type: native
	I1123 08:57:16.838438   62034 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.72.170 22 <nil> <nil>}
	I1123 08:57:16.838465   62034 main.go:143] libmachine: About to run SSH command:
	
			if ! grep -xq '.*\sembed-certs-059363' /etc/hosts; then
				if grep -xq '127.0.1.1\s.*' /etc/hosts; then
					sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 embed-certs-059363/g' /etc/hosts;
				else 
					echo '127.0.1.1 embed-certs-059363' | sudo tee -a /etc/hosts; 
				fi
			fi
	I1123 08:57:16.972318   62034 main.go:143] libmachine: SSH cmd err, output: <nil>: 
	I1123 08:57:16.972350   62034 buildroot.go:172] set auth options {CertDir:/home/jenkins/minikube-integration/21966-18241/.minikube CaCertPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21966-18241/.minikube}
	I1123 08:57:16.972374   62034 buildroot.go:174] setting up certificates
	I1123 08:57:16.972395   62034 provision.go:84] configureAuth start
	I1123 08:57:16.976994   62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:16.977623   62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
	I1123 08:57:16.977662   62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:16.980665   62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:16.981134   62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
	I1123 08:57:16.981158   62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:16.981351   62034 provision.go:143] copyHostCerts
	I1123 08:57:16.981431   62034 exec_runner.go:144] found /home/jenkins/minikube-integration/21966-18241/.minikube/ca.pem, removing ...
	I1123 08:57:16.981446   62034 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21966-18241/.minikube/ca.pem
	I1123 08:57:16.981523   62034 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21966-18241/.minikube/ca.pem (1082 bytes)
	I1123 08:57:16.981635   62034 exec_runner.go:144] found /home/jenkins/minikube-integration/21966-18241/.minikube/cert.pem, removing ...
	I1123 08:57:16.981646   62034 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21966-18241/.minikube/cert.pem
	I1123 08:57:16.981690   62034 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21966-18241/.minikube/cert.pem (1123 bytes)
	I1123 08:57:16.981769   62034 exec_runner.go:144] found /home/jenkins/minikube-integration/21966-18241/.minikube/key.pem, removing ...
	I1123 08:57:16.981779   62034 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21966-18241/.minikube/key.pem
	I1123 08:57:16.981817   62034 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21966-18241/.minikube/key.pem (1675 bytes)
	I1123 08:57:16.981897   62034 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21966-18241/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca-key.pem org=jenkins.embed-certs-059363 san=[127.0.0.1 192.168.72.170 embed-certs-059363 localhost minikube]
	I1123 08:57:17.112794   62034 provision.go:177] copyRemoteCerts
	I1123 08:57:17.112848   62034 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
	I1123 08:57:17.115853   62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:17.116282   62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
	I1123 08:57:17.116308   62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:17.116478   62034 sshutil.go:53] new ssh client: &{IP:192.168.72.170 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/embed-certs-059363/id_rsa Username:docker}
	I1123 08:57:17.223809   62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
	I1123 08:57:17.266771   62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
	I1123 08:57:17.305976   62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/machines/server.pem --> /etc/docker/server.pem (1224 bytes)
	I1123 08:57:17.336820   62034 provision.go:87] duration metric: took 364.408049ms to configureAuth
	I1123 08:57:17.336863   62034 buildroot.go:189] setting minikube options for container-runtime
	I1123 08:57:17.337080   62034 config.go:182] Loaded profile config "embed-certs-059363": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
	I1123 08:57:17.339671   62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:17.340090   62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
	I1123 08:57:17.340112   62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:17.340318   62034 main.go:143] libmachine: Using SSH client type: native
	I1123 08:57:17.340623   62034 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.72.170 22 <nil> <nil>}
	I1123 08:57:17.340643   62034 main.go:143] libmachine: About to run SSH command:
	df --output=fstype / | tail -n 1
	I1123 08:57:17.463677   62034 main.go:143] libmachine: SSH cmd err, output: <nil>: tmpfs
	
	I1123 08:57:17.463707   62034 buildroot.go:70] root file system type: tmpfs
	I1123 08:57:17.463928   62034 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
	I1123 08:57:17.467227   62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:17.467655   62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
	I1123 08:57:17.467686   62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:17.467940   62034 main.go:143] libmachine: Using SSH client type: native
	I1123 08:57:17.468174   62034 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.72.170 22 <nil> <nil>}
	I1123 08:57:17.468268   62034 main.go:143] libmachine: About to run SSH command:
	sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
	Description=Docker Application Container Engine
	Documentation=https://docs.docker.com
	After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
	Wants=network-online.target containerd.service
	Requires=docker.socket
	StartLimitBurst=3
	StartLimitIntervalSec=60
	[Service]
	Type=notify
	Restart=always
	
	
	
	ExecStart=
	ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 \
		-H fd:// --containerd=/run/containerd/containerd.sock \
		-H unix:///var/run/docker.sock \
		--default-ulimit=nofile=1048576:1048576 \
		--tlsverify \
		--tlscacert /etc/docker/ca.pem \
		--tlscert /etc/docker/server.pem \
		--tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12 
	ExecReload=/bin/kill -s HUP \$MAINPID
	
	# Having non-zero Limit*s causes performance problems due to accounting overhead
	# in the kernel. We recommend using cgroups to do container-local accounting.
	LimitNOFILE=infinity
	LimitNPROC=infinity
	LimitCORE=infinity
	
	# Uncomment TasksMax if your systemd version supports it.
	# Only systemd 226 and above support this version.
	TasksMax=infinity
	
	# set delegate yes so that systemd does not reset the cgroups of docker containers
	Delegate=yes
	
	# kill only the docker process, not all processes in the cgroup
	KillMode=process
	OOMScoreAdjust=-500
	
	[Install]
	WantedBy=multi-user.target
	" | sudo tee /lib/systemd/system/docker.service.new
	I1123 08:57:17.602870   62034 main.go:143] libmachine: SSH cmd err, output: <nil>: [Unit]
	Description=Docker Application Container Engine
	Documentation=https://docs.docker.com
	After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
	Wants=network-online.target containerd.service
	Requires=docker.socket
	StartLimitBurst=3
	StartLimitIntervalSec=60
	[Service]
	Type=notify
	Restart=always
	
	
	
	ExecStart=
	ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 	-H fd:// --containerd=/run/containerd/containerd.sock 	-H unix:///var/run/docker.sock 	--default-ulimit=nofile=1048576:1048576 	--tlsverify 	--tlscacert /etc/docker/ca.pem 	--tlscert /etc/docker/server.pem 	--tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12 
	ExecReload=/bin/kill -s HUP $MAINPID
	
	# Having non-zero Limit*s causes performance problems due to accounting overhead
	# in the kernel. We recommend using cgroups to do container-local accounting.
	LimitNOFILE=infinity
	LimitNPROC=infinity
	LimitCORE=infinity
	
	# Uncomment TasksMax if your systemd version supports it.
	# Only systemd 226 and above support this version.
	TasksMax=infinity
	
	# set delegate yes so that systemd does not reset the cgroups of docker containers
	Delegate=yes
	
	# kill only the docker process, not all processes in the cgroup
	KillMode=process
	OOMScoreAdjust=-500
	
	[Install]
	WantedBy=multi-user.target
	
	I1123 08:57:17.606541   62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:17.607111   62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
	I1123 08:57:17.607152   62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:17.607427   62034 main.go:143] libmachine: Using SSH client type: native
	I1123 08:57:17.607698   62034 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.72.170 22 <nil> <nil>}
	I1123 08:57:17.607716   62034 main.go:143] libmachine: About to run SSH command:
	sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
	I1123 08:57:19.186051   62386 start.go:364] duration metric: took 9.989286317s to acquireMachinesLock for "newest-cni-078196"
	I1123 08:57:19.186120   62386 start.go:93] Provisioning new machine with config: &{Name:newest-cni-078196 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21924/minikube-v1.37.0-1763503576-21924-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{K
ubernetesVersion:v1.34.1 ClusterName:newest-cni-078196 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:kubeadm Key:pod-network-cidr Value:10.42.0.0/16}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:false default_sa:true extra:false kubelet:false node_ready:false system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:2
62144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}
	I1123 08:57:19.186215   62386 start.go:125] createHost starting for "" (driver="kvm2")
	W1123 08:57:15.950255   61684 pod_ready.go:104] pod "coredns-66bc5c9577-nj6pk" is not "Ready", error: <nil>
	W1123 08:57:17.951890   61684 pod_ready.go:104] pod "coredns-66bc5c9577-nj6pk" is not "Ready", error: <nil>
	I1123 08:57:19.962419   61684 pod_ready.go:94] pod "coredns-66bc5c9577-nj6pk" is "Ready"
	I1123 08:57:19.962449   61684 pod_ready.go:86] duration metric: took 8.021055049s for pod "coredns-66bc5c9577-nj6pk" in "kube-system" namespace to be "Ready" or be gone ...
	I1123 08:57:19.967799   61684 pod_ready.go:83] waiting for pod "etcd-no-preload-019660" in "kube-system" namespace to be "Ready" or be gone ...
	I1123 08:57:19.977812   61684 pod_ready.go:94] pod "etcd-no-preload-019660" is "Ready"
	I1123 08:57:19.977834   61684 pod_ready.go:86] duration metric: took 10.013782ms for pod "etcd-no-preload-019660" in "kube-system" namespace to be "Ready" or be gone ...
	I1123 08:57:19.982683   61684 pod_ready.go:83] waiting for pod "kube-apiserver-no-preload-019660" in "kube-system" namespace to be "Ready" or be gone ...
	I1123 08:57:19.992798   61684 pod_ready.go:94] pod "kube-apiserver-no-preload-019660" is "Ready"
	I1123 08:57:19.992831   61684 pod_ready.go:86] duration metric: took 10.122708ms for pod "kube-apiserver-no-preload-019660" in "kube-system" namespace to be "Ready" or be gone ...
	I1123 08:57:19.997939   61684 pod_ready.go:83] waiting for pod "kube-controller-manager-no-preload-019660" in "kube-system" namespace to be "Ready" or be gone ...
	I1123 08:57:20.145706   61684 pod_ready.go:94] pod "kube-controller-manager-no-preload-019660" is "Ready"
	I1123 08:57:20.145742   61684 pod_ready.go:86] duration metric: took 147.777309ms for pod "kube-controller-manager-no-preload-019660" in "kube-system" namespace to be "Ready" or be gone ...
	I1123 08:57:20.355205   61684 pod_ready.go:83] waiting for pod "kube-proxy-wlb9w" in "kube-system" namespace to be "Ready" or be gone ...
	I1123 08:57:20.756189   61684 pod_ready.go:94] pod "kube-proxy-wlb9w" is "Ready"
	I1123 08:57:20.756259   61684 pod_ready.go:86] duration metric: took 400.985169ms for pod "kube-proxy-wlb9w" in "kube-system" namespace to be "Ready" or be gone ...
	I1123 08:57:20.947647   61684 pod_ready.go:83] waiting for pod "kube-scheduler-no-preload-019660" in "kube-system" namespace to be "Ready" or be gone ...
	I1123 08:57:21.350509   61684 pod_ready.go:94] pod "kube-scheduler-no-preload-019660" is "Ready"
	I1123 08:57:21.350539   61684 pod_ready.go:86] duration metric: took 402.864201ms for pod "kube-scheduler-no-preload-019660" in "kube-system" namespace to be "Ready" or be gone ...
	I1123 08:57:21.350552   61684 pod_ready.go:40] duration metric: took 9.416731421s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
	I1123 08:57:21.405369   61684 start.go:625] kubectl: 1.34.2, cluster: 1.34.1 (minor skew: 0)
	I1123 08:57:21.409795   61684 out.go:179] * Done! kubectl is now configured to use "no-preload-019660" cluster and "default" namespace by default
	I1123 08:57:17.000521   62480 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime docker
	I1123 08:57:17.000560   62480 preload.go:203] Found local preload: /home/jenkins/minikube-integration/21966-18241/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-docker-overlay2-amd64.tar.lz4
	I1123 08:57:17.000571   62480 cache.go:65] Caching tarball of preloaded images
	I1123 08:57:17.000667   62480 preload.go:238] Found /home/jenkins/minikube-integration/21966-18241/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-docker-overlay2-amd64.tar.lz4 in cache, skipping download
	I1123 08:57:17.000683   62480 cache.go:68] Finished verifying existence of preloaded tar for v1.34.1 on docker
	I1123 08:57:17.000806   62480 profile.go:143] Saving config to /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/default-k8s-diff-port-925051/config.json ...
	I1123 08:57:17.001089   62480 start.go:360] acquireMachinesLock for default-k8s-diff-port-925051: {Name:mka7dedac533b164a995f5c19cff4f68d827bd22 Clock:{} Delay:500ms Timeout:13m0s Cancel:<nil>}
	I1123 08:57:18.895461   62034 main.go:143] libmachine: SSH cmd err, output: <nil>: diff: can't stat '/lib/systemd/system/docker.service': No such file or directory
	Created symlink '/etc/systemd/system/multi-user.target.wants/docker.service' → '/usr/lib/systemd/system/docker.service'.
	
	I1123 08:57:18.895495   62034 machine.go:97] duration metric: took 2.351059819s to provisionDockerMachine
	I1123 08:57:18.895519   62034 start.go:293] postStartSetup for "embed-certs-059363" (driver="kvm2")
	I1123 08:57:18.895547   62034 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
	I1123 08:57:18.895631   62034 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
	I1123 08:57:18.899037   62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:18.899549   62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
	I1123 08:57:18.899585   62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:18.899747   62034 sshutil.go:53] new ssh client: &{IP:192.168.72.170 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/embed-certs-059363/id_rsa Username:docker}
	I1123 08:57:18.995822   62034 ssh_runner.go:195] Run: cat /etc/os-release
	I1123 08:57:19.001215   62034 info.go:137] Remote host: Buildroot 2025.02
	I1123 08:57:19.001261   62034 filesync.go:126] Scanning /home/jenkins/minikube-integration/21966-18241/.minikube/addons for local assets ...
	I1123 08:57:19.001335   62034 filesync.go:126] Scanning /home/jenkins/minikube-integration/21966-18241/.minikube/files for local assets ...
	I1123 08:57:19.001434   62034 filesync.go:149] local asset: /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/ssl/certs/221482.pem -> 221482.pem in /etc/ssl/certs
	I1123 08:57:19.001551   62034 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
	I1123 08:57:19.015155   62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/ssl/certs/221482.pem --> /etc/ssl/certs/221482.pem (1708 bytes)
	I1123 08:57:19.054248   62034 start.go:296] duration metric: took 158.692501ms for postStartSetup
	I1123 08:57:19.054294   62034 fix.go:56] duration metric: took 20.246777293s for fixHost
	I1123 08:57:19.058146   62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:19.058727   62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
	I1123 08:57:19.058771   62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:19.058998   62034 main.go:143] libmachine: Using SSH client type: native
	I1123 08:57:19.059317   62034 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.72.170 22 <nil> <nil>}
	I1123 08:57:19.059336   62034 main.go:143] libmachine: About to run SSH command:
	date +%s.%N
	I1123 08:57:19.185896   62034 main.go:143] libmachine: SSH cmd err, output: <nil>: 1763888239.115597688
	
	I1123 08:57:19.185919   62034 fix.go:216] guest clock: 1763888239.115597688
	I1123 08:57:19.185926   62034 fix.go:229] Guest: 2025-11-23 08:57:19.115597688 +0000 UTC Remote: 2025-11-23 08:57:19.054315183 +0000 UTC m=+20.376918396 (delta=61.282505ms)
	I1123 08:57:19.185941   62034 fix.go:200] guest clock delta is within tolerance: 61.282505ms
	I1123 08:57:19.185962   62034 start.go:83] releasing machines lock for "embed-certs-059363", held for 20.37844631s
	I1123 08:57:19.189984   62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:19.190596   62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
	I1123 08:57:19.190635   62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:19.191288   62034 ssh_runner.go:195] Run: cat /version.json
	I1123 08:57:19.191295   62034 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
	I1123 08:57:19.195221   62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:19.195642   62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:19.195676   62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
	I1123 08:57:19.195699   62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:19.195883   62034 sshutil.go:53] new ssh client: &{IP:192.168.72.170 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/embed-certs-059363/id_rsa Username:docker}
	I1123 08:57:19.196195   62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
	I1123 08:57:19.196264   62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:19.196563   62034 sshutil.go:53] new ssh client: &{IP:192.168.72.170 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/embed-certs-059363/id_rsa Username:docker}
	I1123 08:57:19.315903   62034 ssh_runner.go:195] Run: systemctl --version
	I1123 08:57:19.323178   62034 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
	W1123 08:57:19.333159   62034 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
	I1123 08:57:19.333365   62034 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
	I1123 08:57:19.356324   62034 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
	I1123 08:57:19.356355   62034 start.go:496] detecting cgroup driver to use...
	I1123 08:57:19.356469   62034 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
	" | sudo tee /etc/crictl.yaml"
	I1123 08:57:19.385750   62034 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
	I1123 08:57:19.400434   62034 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
	I1123 08:57:19.414104   62034 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
	I1123 08:57:19.414182   62034 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
	I1123 08:57:19.433788   62034 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
	I1123 08:57:19.449538   62034 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
	I1123 08:57:19.464107   62034 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
	I1123 08:57:19.481469   62034 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
	I1123 08:57:19.496533   62034 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
	I1123 08:57:19.511385   62034 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
	I1123 08:57:19.525634   62034 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1  enable_unprivileged_ports = true|' /etc/containerd/config.toml"
	I1123 08:57:19.544298   62034 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
	I1123 08:57:19.560120   62034 crio.go:166] couldn't verify netfilter by "sudo sysctl net.bridge.bridge-nf-call-iptables" which might be okay. error: sudo sysctl net.bridge.bridge-nf-call-iptables: Process exited with status 1
	stdout:
	
	stderr:
	sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
	I1123 08:57:19.560179   62034 ssh_runner.go:195] Run: sudo modprobe br_netfilter
	I1123 08:57:19.576631   62034 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
	I1123 08:57:19.592833   62034 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1123 08:57:19.763221   62034 ssh_runner.go:195] Run: sudo systemctl restart containerd
	I1123 08:57:19.811223   62034 start.go:496] detecting cgroup driver to use...
	I1123 08:57:19.811335   62034 ssh_runner.go:195] Run: sudo systemctl cat docker.service
	I1123 08:57:19.833532   62034 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
	I1123 08:57:19.859627   62034 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
	I1123 08:57:19.884432   62034 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
	I1123 08:57:19.903805   62034 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
	I1123 08:57:19.921275   62034 ssh_runner.go:195] Run: sudo systemctl stop -f crio
	I1123 08:57:19.960990   62034 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
	I1123 08:57:19.980317   62034 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
	" | sudo tee /etc/crictl.yaml"
	I1123 08:57:20.008661   62034 ssh_runner.go:195] Run: which cri-dockerd
	I1123 08:57:20.013631   62034 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
	I1123 08:57:20.029302   62034 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (192 bytes)
	I1123 08:57:20.057103   62034 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
	I1123 08:57:20.252891   62034 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
	I1123 08:57:20.490326   62034 docker.go:575] configuring docker to use "cgroupfs" as cgroup driver...
	I1123 08:57:20.490458   62034 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
	I1123 08:57:20.526773   62034 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
	I1123 08:57:20.548985   62034 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1123 08:57:20.740694   62034 ssh_runner.go:195] Run: sudo systemctl restart docker
	I1123 08:57:21.481342   62034 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
	I1123 08:57:21.507341   62034 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
	I1123 08:57:21.530703   62034 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
	I1123 08:57:21.555618   62034 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
	I1123 08:57:21.736442   62034 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
	I1123 08:57:21.910308   62034 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1123 08:57:22.084793   62034 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
	I1123 08:57:22.133988   62034 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
	I1123 08:57:22.150466   62034 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1123 08:57:22.310923   62034 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
	I1123 08:57:22.333687   62034 ssh_runner.go:195] Run: sudo journalctl --no-pager -u cri-docker.service
	I1123 08:57:22.355809   62034 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
	I1123 08:57:22.373321   62034 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
	I1123 08:57:22.392686   62034 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1123 08:57:22.568456   62034 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
	I1123 08:57:22.588895   62034 ssh_runner.go:195] Run: sudo journalctl --no-pager -u cri-docker.service
	I1123 08:57:22.604152   62034 retry.go:31] will retry after 1.30731135s: cri-docker.service not running
	I1123 08:57:19.188404   62386 out.go:252] * Creating kvm2 VM (CPUs=2, Memory=3072MB, Disk=20000MB) ...
	I1123 08:57:19.188687   62386 start.go:159] libmachine.API.Create for "newest-cni-078196" (driver="kvm2")
	I1123 08:57:19.188735   62386 client.go:173] LocalClient.Create starting
	I1123 08:57:19.188852   62386 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem
	I1123 08:57:19.188919   62386 main.go:143] libmachine: Decoding PEM data...
	I1123 08:57:19.188950   62386 main.go:143] libmachine: Parsing certificate...
	I1123 08:57:19.189026   62386 main.go:143] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21966-18241/.minikube/certs/cert.pem
	I1123 08:57:19.189059   62386 main.go:143] libmachine: Decoding PEM data...
	I1123 08:57:19.189080   62386 main.go:143] libmachine: Parsing certificate...
	I1123 08:57:19.189577   62386 main.go:143] libmachine: creating domain...
	I1123 08:57:19.189595   62386 main.go:143] libmachine: creating network...
	I1123 08:57:19.191331   62386 main.go:143] libmachine: found existing default network
	I1123 08:57:19.191879   62386 main.go:143] libmachine: <network connections='3'>
	  <name>default</name>
	  <uuid>c61344c2-dba2-46dd-a21a-34776d235985</uuid>
	  <forward mode='nat'>
	    <nat>
	      <port start='1024' end='65535'/>
	    </nat>
	  </forward>
	  <bridge name='virbr0' stp='on' delay='0'/>
	  <mac address='52:54:00:10:a2:1d'/>
	  <ip address='192.168.122.1' netmask='255.255.255.0'>
	    <dhcp>
	      <range start='192.168.122.2' end='192.168.122.254'/>
	    </dhcp>
	  </ip>
	</network>
	
	I1123 08:57:19.193313   62386 network.go:206] using free private subnet 192.168.39.0/24: &{IP:192.168.39.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.39.0/24 Gateway:192.168.39.1 ClientMin:192.168.39.2 ClientMax:192.168.39.254 Broadcast:192.168.39.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001e04740}
	I1123 08:57:19.193434   62386 main.go:143] libmachine: defining private network:
	
	<network>
	  <name>mk-newest-cni-078196</name>
	  <dns enable='no'/>
	  <ip address='192.168.39.1' netmask='255.255.255.0'>
	    <dhcp>
	      <range start='192.168.39.2' end='192.168.39.253'/>
	    </dhcp>
	  </ip>
	</network>
	
	I1123 08:57:19.200866   62386 main.go:143] libmachine: creating private network mk-newest-cni-078196 192.168.39.0/24...
	I1123 08:57:19.291873   62386 main.go:143] libmachine: private network mk-newest-cni-078196 192.168.39.0/24 created
	I1123 08:57:19.292226   62386 main.go:143] libmachine: <network>
	  <name>mk-newest-cni-078196</name>
	  <uuid>d7bc9eb0-778c-4b77-a392-72f78dc9558b</uuid>
	  <bridge name='virbr1' stp='on' delay='0'/>
	  <mac address='52:54:00:20:cc:6a'/>
	  <dns enable='no'/>
	  <ip address='192.168.39.1' netmask='255.255.255.0'>
	    <dhcp>
	      <range start='192.168.39.2' end='192.168.39.253'/>
	    </dhcp>
	  </ip>
	</network>
	
	I1123 08:57:19.292287   62386 main.go:143] libmachine: setting up store path in /home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196 ...
	I1123 08:57:19.292318   62386 main.go:143] libmachine: building disk image from file:///home/jenkins/minikube-integration/21966-18241/.minikube/cache/iso/amd64/minikube-v1.37.0-1763503576-21924-amd64.iso
	I1123 08:57:19.292332   62386 common.go:152] Making disk image using store path: /home/jenkins/minikube-integration/21966-18241/.minikube
	I1123 08:57:19.292416   62386 main.go:143] libmachine: Downloading /home/jenkins/minikube-integration/21966-18241/.minikube/cache/boot2docker.iso from file:///home/jenkins/minikube-integration/21966-18241/.minikube/cache/iso/amd64/minikube-v1.37.0-1763503576-21924-amd64.iso...
	I1123 08:57:19.540811   62386 common.go:159] Creating ssh key: /home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196/id_rsa...
	I1123 08:57:19.628322   62386 common.go:165] Creating raw disk image: /home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196/newest-cni-078196.rawdisk...
	I1123 08:57:19.628370   62386 main.go:143] libmachine: Writing magic tar header
	I1123 08:57:19.628409   62386 main.go:143] libmachine: Writing SSH key tar header
	I1123 08:57:19.628532   62386 common.go:179] Fixing permissions on /home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196 ...
	I1123 08:57:19.628646   62386 main.go:143] libmachine: checking permissions on dir: /home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196
	I1123 08:57:19.628680   62386 main.go:143] libmachine: setting executable bit set on /home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196 (perms=drwx------)
	I1123 08:57:19.628696   62386 main.go:143] libmachine: checking permissions on dir: /home/jenkins/minikube-integration/21966-18241/.minikube/machines
	I1123 08:57:19.628716   62386 main.go:143] libmachine: setting executable bit set on /home/jenkins/minikube-integration/21966-18241/.minikube/machines (perms=drwxr-xr-x)
	I1123 08:57:19.628737   62386 main.go:143] libmachine: checking permissions on dir: /home/jenkins/minikube-integration/21966-18241/.minikube
	I1123 08:57:19.628753   62386 main.go:143] libmachine: setting executable bit set on /home/jenkins/minikube-integration/21966-18241/.minikube (perms=drwxr-xr-x)
	I1123 08:57:19.628766   62386 main.go:143] libmachine: checking permissions on dir: /home/jenkins/minikube-integration/21966-18241
	I1123 08:57:19.628783   62386 main.go:143] libmachine: setting executable bit set on /home/jenkins/minikube-integration/21966-18241 (perms=drwxrwxr-x)
	I1123 08:57:19.628796   62386 main.go:143] libmachine: checking permissions on dir: /home/jenkins/minikube-integration
	I1123 08:57:19.628812   62386 main.go:143] libmachine: setting executable bit set on /home/jenkins/minikube-integration (perms=drwxrwxr-x)
	I1123 08:57:19.628825   62386 main.go:143] libmachine: checking permissions on dir: /home/jenkins
	I1123 08:57:19.628845   62386 main.go:143] libmachine: setting executable bit set on /home/jenkins (perms=drwxr-xr-x)
	I1123 08:57:19.628862   62386 main.go:143] libmachine: checking permissions on dir: /home
	I1123 08:57:19.628874   62386 main.go:143] libmachine: skipping /home - not owner
	I1123 08:57:19.628886   62386 main.go:143] libmachine: defining domain...
	I1123 08:57:19.630619   62386 main.go:143] libmachine: defining domain using XML: 
	<domain type='kvm'>
	  <name>newest-cni-078196</name>
	  <memory unit='MiB'>3072</memory>
	  <vcpu>2</vcpu>
	  <features>
	    <acpi/>
	    <apic/>
	    <pae/>
	  </features>
	  <cpu mode='host-passthrough'>
	  </cpu>
	  <os>
	    <type>hvm</type>
	    <boot dev='cdrom'/>
	    <boot dev='hd'/>
	    <bootmenu enable='no'/>
	  </os>
	  <devices>
	    <disk type='file' device='cdrom'>
	      <source file='/home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196/boot2docker.iso'/>
	      <target dev='hdc' bus='scsi'/>
	      <readonly/>
	    </disk>
	    <disk type='file' device='disk'>
	      <driver name='qemu' type='raw' cache='default' io='threads' />
	      <source file='/home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196/newest-cni-078196.rawdisk'/>
	      <target dev='hda' bus='virtio'/>
	    </disk>
	    <interface type='network'>
	      <source network='mk-newest-cni-078196'/>
	      <model type='virtio'/>
	    </interface>
	    <interface type='network'>
	      <source network='default'/>
	      <model type='virtio'/>
	    </interface>
	    <serial type='pty'>
	      <target port='0'/>
	    </serial>
	    <console type='pty'>
	      <target type='serial' port='0'/>
	    </console>
	    <rng model='virtio'>
	      <backend model='random'>/dev/random</backend>
	    </rng>
	  </devices>
	</domain>
	
	I1123 08:57:19.637651   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:7a:a4:6b in network default
	I1123 08:57:19.638554   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:19.638580   62386 main.go:143] libmachine: starting domain...
	I1123 08:57:19.638587   62386 main.go:143] libmachine: ensuring networks are active...
	I1123 08:57:19.639501   62386 main.go:143] libmachine: Ensuring network default is active
	I1123 08:57:19.640013   62386 main.go:143] libmachine: Ensuring network mk-newest-cni-078196 is active
	I1123 08:57:19.640748   62386 main.go:143] libmachine: getting domain XML...
	I1123 08:57:19.642270   62386 main.go:143] libmachine: starting domain XML:
	<domain type='kvm'>
	  <name>newest-cni-078196</name>
	  <uuid>67bf4217-d2fd-4841-a93c-e1581f4c5592</uuid>
	  <memory unit='KiB'>3145728</memory>
	  <currentMemory unit='KiB'>3145728</currentMemory>
	  <vcpu placement='static'>2</vcpu>
	  <os>
	    <type arch='x86_64' machine='pc-i440fx-jammy'>hvm</type>
	    <boot dev='cdrom'/>
	    <boot dev='hd'/>
	    <bootmenu enable='no'/>
	  </os>
	  <features>
	    <acpi/>
	    <apic/>
	    <pae/>
	  </features>
	  <cpu mode='host-passthrough' check='none' migratable='on'/>
	  <clock offset='utc'/>
	  <on_poweroff>destroy</on_poweroff>
	  <on_reboot>restart</on_reboot>
	  <on_crash>destroy</on_crash>
	  <devices>
	    <emulator>/usr/bin/qemu-system-x86_64</emulator>
	    <disk type='file' device='cdrom'>
	      <driver name='qemu' type='raw'/>
	      <source file='/home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196/boot2docker.iso'/>
	      <target dev='hdc' bus='scsi'/>
	      <readonly/>
	      <address type='drive' controller='0' bus='0' target='0' unit='2'/>
	    </disk>
	    <disk type='file' device='disk'>
	      <driver name='qemu' type='raw' io='threads'/>
	      <source file='/home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196/newest-cni-078196.rawdisk'/>
	      <target dev='hda' bus='virtio'/>
	      <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
	    </disk>
	    <controller type='usb' index='0' model='piix3-uhci'>
	      <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>
	    </controller>
	    <controller type='pci' index='0' model='pci-root'/>
	    <controller type='scsi' index='0' model='lsilogic'>
	      <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
	    </controller>
	    <interface type='network'>
	      <mac address='52:54:00:d7:c1:0d'/>
	      <source network='mk-newest-cni-078196'/>
	      <model type='virtio'/>
	      <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
	    </interface>
	    <interface type='network'>
	      <mac address='52:54:00:7a:a4:6b'/>
	      <source network='default'/>
	      <model type='virtio'/>
	      <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
	    </interface>
	    <serial type='pty'>
	      <target type='isa-serial' port='0'>
	        <model name='isa-serial'/>
	      </target>
	    </serial>
	    <console type='pty'>
	      <target type='serial' port='0'/>
	    </console>
	    <input type='mouse' bus='ps2'/>
	    <input type='keyboard' bus='ps2'/>
	    <audio id='1' type='none'/>
	    <memballoon model='virtio'>
	      <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
	    </memballoon>
	    <rng model='virtio'>
	      <backend model='random'>/dev/random</backend>
	      <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
	    </rng>
	  </devices>
	</domain>
	
	I1123 08:57:21.239037   62386 main.go:143] libmachine: waiting for domain to start...
	I1123 08:57:21.240876   62386 main.go:143] libmachine: domain is now running
	I1123 08:57:21.240900   62386 main.go:143] libmachine: waiting for IP...
	I1123 08:57:21.241736   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:21.242592   62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
	I1123 08:57:21.242611   62386 main.go:143] libmachine: trying to list again with source=arp
	I1123 08:57:21.243307   62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
	I1123 08:57:21.243346   62386 retry.go:31] will retry after 218.272628ms: waiting for domain to come up
	I1123 08:57:21.462945   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:21.463818   62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
	I1123 08:57:21.463835   62386 main.go:143] libmachine: trying to list again with source=arp
	I1123 08:57:21.464322   62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
	I1123 08:57:21.464353   62386 retry.go:31] will retry after 354.758102ms: waiting for domain to come up
	I1123 08:57:21.820932   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:21.821871   62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
	I1123 08:57:21.821891   62386 main.go:143] libmachine: trying to list again with source=arp
	I1123 08:57:21.822290   62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
	I1123 08:57:21.822322   62386 retry.go:31] will retry after 480.079581ms: waiting for domain to come up
	I1123 08:57:22.304134   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:22.305030   62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
	I1123 08:57:22.305053   62386 main.go:143] libmachine: trying to list again with source=arp
	I1123 08:57:22.305471   62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
	I1123 08:57:22.305501   62386 retry.go:31] will retry after 430.762091ms: waiting for domain to come up
	I1123 08:57:22.738137   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:22.739007   62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
	I1123 08:57:22.739022   62386 main.go:143] libmachine: trying to list again with source=arp
	I1123 08:57:22.739466   62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
	I1123 08:57:22.739499   62386 retry.go:31] will retry after 752.582052ms: waiting for domain to come up
	I1123 08:57:23.493414   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:23.494256   62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
	I1123 08:57:23.494271   62386 main.go:143] libmachine: trying to list again with source=arp
	I1123 08:57:23.494669   62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
	I1123 08:57:23.494696   62386 retry.go:31] will retry after 765.228537ms: waiting for domain to come up
	I1123 08:57:23.912604   62034 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
	I1123 08:57:23.930659   62034 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
	I1123 08:57:23.946465   62034 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1123 08:57:24.099133   62034 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
	I1123 08:57:24.217974   62034 retry.go:31] will retry after 1.350292483s: cri-docker.service not running
	I1123 08:57:25.569520   62034 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
	I1123 08:57:25.588082   62034 start.go:543] Will wait 60s for socket path /var/run/cri-dockerd.sock
	I1123 08:57:25.588166   62034 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
	I1123 08:57:25.595521   62034 start.go:564] Will wait 60s for crictl version
	I1123 08:57:25.595597   62034 ssh_runner.go:195] Run: which crictl
	I1123 08:57:25.600903   62034 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
	I1123 08:57:25.642159   62034 start.go:580] Version:  0.1.0
	RuntimeName:  docker
	RuntimeVersion:  28.5.1
	RuntimeApiVersion:  v1
	I1123 08:57:25.642260   62034 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
	I1123 08:57:25.678324   62034 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
	I1123 08:57:25.708968   62034 out.go:252] * Preparing Kubernetes v1.34.1 on Docker 28.5.1 ...
	I1123 08:57:25.712357   62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:25.712811   62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
	I1123 08:57:25.712861   62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:25.713088   62034 ssh_runner.go:195] Run: grep 192.168.72.1	host.minikube.internal$ /etc/hosts
	I1123 08:57:25.718506   62034 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.72.1	host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
	I1123 08:57:25.737282   62034 kubeadm.go:884] updating cluster {Name:embed-certs-059363 KeepContext:false EmbedCerts:true MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21924/minikube-v1.37.0-1763503576-21924-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1
.34.1 ClusterName:embed-certs-059363 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.72.170 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: Multi
NodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
	I1123 08:57:25.737446   62034 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime docker
	I1123 08:57:25.737523   62034 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
	I1123 08:57:25.759347   62034 docker.go:691] Got preloaded images: -- stdout --
	gcr.io/k8s-minikube/gvisor-addon:2
	registry.k8s.io/kube-scheduler:v1.34.1
	registry.k8s.io/kube-apiserver:v1.34.1
	registry.k8s.io/kube-controller-manager:v1.34.1
	registry.k8s.io/kube-proxy:v1.34.1
	registry.k8s.io/etcd:3.6.4-0
	registry.k8s.io/pause:3.10.1
	registry.k8s.io/coredns/coredns:v1.12.1
	gcr.io/k8s-minikube/storage-provisioner:v5
	gcr.io/k8s-minikube/busybox:1.28.4-glibc
	
	-- /stdout --
	I1123 08:57:25.759372   62034 docker.go:621] Images already preloaded, skipping extraction
	I1123 08:57:25.759440   62034 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
	I1123 08:57:25.784761   62034 docker.go:691] Got preloaded images: -- stdout --
	gcr.io/k8s-minikube/gvisor-addon:2
	registry.k8s.io/kube-apiserver:v1.34.1
	registry.k8s.io/kube-scheduler:v1.34.1
	registry.k8s.io/kube-controller-manager:v1.34.1
	registry.k8s.io/kube-proxy:v1.34.1
	registry.k8s.io/etcd:3.6.4-0
	registry.k8s.io/pause:3.10.1
	registry.k8s.io/coredns/coredns:v1.12.1
	gcr.io/k8s-minikube/storage-provisioner:v5
	gcr.io/k8s-minikube/busybox:1.28.4-glibc
	
	-- /stdout --
	I1123 08:57:25.784786   62034 cache_images.go:86] Images are preloaded, skipping loading
	I1123 08:57:25.784796   62034 kubeadm.go:935] updating node { 192.168.72.170 8443 v1.34.1 docker true true} ...
	I1123 08:57:25.784906   62034 kubeadm.go:947] kubelet [Unit]
	Wants=docker.socket
	
	[Service]
	ExecStart=
	ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=embed-certs-059363 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.72.170
	
	[Install]
	 config:
	{KubernetesVersion:v1.34.1 ClusterName:embed-certs-059363 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
	I1123 08:57:25.784959   62034 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
	I1123 08:57:25.840443   62034 cni.go:84] Creating CNI manager for ""
	I1123 08:57:25.840484   62034 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
	I1123 08:57:25.840500   62034 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
	I1123 08:57:25.840520   62034 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.72.170 APIServerPort:8443 KubernetesVersion:v1.34.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:embed-certs-059363 NodeName:embed-certs-059363 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.72.170"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.72.170 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPo
dPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
	I1123 08:57:25.840651   62034 kubeadm.go:196] kubeadm config:
	apiVersion: kubeadm.k8s.io/v1beta4
	kind: InitConfiguration
	localAPIEndpoint:
	  advertiseAddress: 192.168.72.170
	  bindPort: 8443
	bootstrapTokens:
	  - groups:
	      - system:bootstrappers:kubeadm:default-node-token
	    ttl: 24h0m0s
	    usages:
	      - signing
	      - authentication
	nodeRegistration:
	  criSocket: unix:///var/run/cri-dockerd.sock
	  name: "embed-certs-059363"
	  kubeletExtraArgs:
	    - name: "node-ip"
	      value: "192.168.72.170"
	  taints: []
	---
	apiVersion: kubeadm.k8s.io/v1beta4
	kind: ClusterConfiguration
	apiServer:
	  certSANs: ["127.0.0.1", "localhost", "192.168.72.170"]
	  extraArgs:
	    - name: "enable-admission-plugins"
	      value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
	controllerManager:
	  extraArgs:
	    - name: "allocate-node-cidrs"
	      value: "true"
	    - name: "leader-elect"
	      value: "false"
	scheduler:
	  extraArgs:
	    - name: "leader-elect"
	      value: "false"
	certificatesDir: /var/lib/minikube/certs
	clusterName: mk
	controlPlaneEndpoint: control-plane.minikube.internal:8443
	etcd:
	  local:
	    dataDir: /var/lib/minikube/etcd
	kubernetesVersion: v1.34.1
	networking:
	  dnsDomain: cluster.local
	  podSubnet: "10.244.0.0/16"
	  serviceSubnet: 10.96.0.0/12
	---
	apiVersion: kubelet.config.k8s.io/v1beta1
	kind: KubeletConfiguration
	authentication:
	  x509:
	    clientCAFile: /var/lib/minikube/certs/ca.crt
	cgroupDriver: cgroupfs
	containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
	hairpinMode: hairpin-veth
	runtimeRequestTimeout: 15m
	clusterDomain: "cluster.local"
	# disable disk resource management by default
	imageGCHighThresholdPercent: 100
	evictionHard:
	  nodefs.available: "0%"
	  nodefs.inodesFree: "0%"
	  imagefs.available: "0%"
	failSwapOn: false
	staticPodPath: /etc/kubernetes/manifests
	---
	apiVersion: kubeproxy.config.k8s.io/v1alpha1
	kind: KubeProxyConfiguration
	clusterCIDR: "10.244.0.0/16"
	metricsBindAddress: 0.0.0.0:10249
	conntrack:
	  maxPerCore: 0
	# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
	  tcpEstablishedTimeout: 0s
	# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
	  tcpCloseWaitTimeout: 0s
	
	I1123 08:57:25.840731   62034 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.1
	I1123 08:57:25.855481   62034 binaries.go:51] Found k8s binaries, skipping transfer
	I1123 08:57:25.855562   62034 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
	I1123 08:57:25.869149   62034 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (319 bytes)
	I1123 08:57:25.890030   62034 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
	I1123 08:57:25.913602   62034 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2225 bytes)
	I1123 08:57:25.939399   62034 ssh_runner.go:195] Run: grep 192.168.72.170	control-plane.minikube.internal$ /etc/hosts
	I1123 08:57:25.944187   62034 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.72.170	control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
	I1123 08:57:25.959980   62034 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1123 08:57:26.112182   62034 ssh_runner.go:195] Run: sudo systemctl start kubelet
	I1123 08:57:26.150488   62034 certs.go:69] Setting up /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/embed-certs-059363 for IP: 192.168.72.170
	I1123 08:57:26.150514   62034 certs.go:195] generating shared ca certs ...
	I1123 08:57:26.150535   62034 certs.go:227] acquiring lock for ca certs: {Name:mk4438f2b659811ea2f01e009d28f1b857a5024c Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1123 08:57:26.150704   62034 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21966-18241/.minikube/ca.key
	I1123 08:57:26.150759   62034 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21966-18241/.minikube/proxy-client-ca.key
	I1123 08:57:26.150773   62034 certs.go:257] generating profile certs ...
	I1123 08:57:26.150910   62034 certs.go:360] skipping valid signed profile cert regeneration for "minikube-user": /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/embed-certs-059363/client.key
	I1123 08:57:26.151011   62034 certs.go:360] skipping valid signed profile cert regeneration for "minikube": /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/embed-certs-059363/apiserver.key.4b3bdd21
	I1123 08:57:26.151069   62034 certs.go:360] skipping valid signed profile cert regeneration for "aggregator": /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/embed-certs-059363/proxy-client.key
	I1123 08:57:26.151216   62034 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/22148.pem (1338 bytes)
	W1123 08:57:26.151290   62034 certs.go:480] ignoring /home/jenkins/minikube-integration/21966-18241/.minikube/certs/22148_empty.pem, impossibly tiny 0 bytes
	I1123 08:57:26.151305   62034 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca-key.pem (1675 bytes)
	I1123 08:57:26.151344   62034 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem (1082 bytes)
	I1123 08:57:26.151380   62034 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/cert.pem (1123 bytes)
	I1123 08:57:26.151415   62034 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/key.pem (1675 bytes)
	I1123 08:57:26.151483   62034 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/ssl/certs/221482.pem (1708 bytes)
	I1123 08:57:26.152356   62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
	I1123 08:57:26.201568   62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
	I1123 08:57:26.246367   62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
	I1123 08:57:26.299610   62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
	I1123 08:57:26.334177   62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/embed-certs-059363/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1428 bytes)
	I1123 08:57:26.372484   62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/embed-certs-059363/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
	I1123 08:57:26.408684   62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/embed-certs-059363/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
	I1123 08:57:26.449833   62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/embed-certs-059363/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
	I1123 08:57:26.493006   62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/ssl/certs/221482.pem --> /usr/share/ca-certificates/221482.pem (1708 bytes)
	I1123 08:57:26.527341   62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
	I1123 08:57:26.564892   62034 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/certs/22148.pem --> /usr/share/ca-certificates/22148.pem (1338 bytes)
	I1123 08:57:26.601408   62034 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
	I1123 08:57:26.626296   62034 ssh_runner.go:195] Run: openssl version
	I1123 08:57:26.634385   62034 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/221482.pem && ln -fs /usr/share/ca-certificates/221482.pem /etc/ssl/certs/221482.pem"
	I1123 08:57:26.650265   62034 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/221482.pem
	I1123 08:57:26.657578   62034 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 23 08:02 /usr/share/ca-certificates/221482.pem
	I1123 08:57:26.657632   62034 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/221482.pem
	I1123 08:57:26.666331   62034 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/221482.pem /etc/ssl/certs/3ec20f2e.0"
	I1123 08:57:26.682746   62034 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
	I1123 08:57:26.697978   62034 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
	I1123 08:57:26.704544   62034 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 23 07:56 /usr/share/ca-certificates/minikubeCA.pem
	I1123 08:57:26.704612   62034 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
	I1123 08:57:26.714575   62034 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
	I1123 08:57:26.730139   62034 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/22148.pem && ln -fs /usr/share/ca-certificates/22148.pem /etc/ssl/certs/22148.pem"
	I1123 08:57:26.745401   62034 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/22148.pem
	I1123 08:57:26.751383   62034 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 23 08:02 /usr/share/ca-certificates/22148.pem
	I1123 08:57:26.751450   62034 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/22148.pem
	I1123 08:57:26.760273   62034 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/22148.pem /etc/ssl/certs/51391683.0"
	I1123 08:57:26.775477   62034 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
	I1123 08:57:26.782298   62034 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-etcd-client.crt -checkend 86400
	I1123 08:57:26.790966   62034 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-kubelet-client.crt -checkend 86400
	I1123 08:57:26.800082   62034 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/server.crt -checkend 86400
	I1123 08:57:26.809033   62034 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/healthcheck-client.crt -checkend 86400
	I1123 08:57:26.818403   62034 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/peer.crt -checkend 86400
	I1123 08:57:26.827424   62034 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/front-proxy-client.crt -checkend 86400
	I1123 08:57:26.836600   62034 kubeadm.go:401] StartCluster: {Name:embed-certs-059363 KeepContext:false EmbedCerts:true MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21924/minikube-v1.37.0-1763503576-21924-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34
.1 ClusterName:embed-certs-059363 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.72.170 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNod
eRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I1123 08:57:26.836750   62034 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
	I1123 08:57:26.857858   62034 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
	I1123 08:57:26.872778   62034 kubeadm.go:417] found existing configuration files, will attempt cluster restart
	I1123 08:57:26.872804   62034 kubeadm.go:598] restartPrimaryControlPlane start ...
	I1123 08:57:26.872861   62034 ssh_runner.go:195] Run: sudo test -d /data/minikube
	I1123 08:57:26.887408   62034 kubeadm.go:131] /data/minikube skipping compat symlinks: sudo test -d /data/minikube: Process exited with status 1
	stdout:
	
	stderr:
	I1123 08:57:26.888007   62034 kubeconfig.go:47] verify endpoint returned: get endpoint: "embed-certs-059363" does not appear in /home/jenkins/minikube-integration/21966-18241/kubeconfig
	I1123 08:57:26.888341   62034 kubeconfig.go:62] /home/jenkins/minikube-integration/21966-18241/kubeconfig needs updating (will repair): [kubeconfig missing "embed-certs-059363" cluster setting kubeconfig missing "embed-certs-059363" context setting]
	I1123 08:57:26.888835   62034 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-18241/kubeconfig: {Name:mk4ff9c09d937b27d93688a0eb9fbee2087daab0 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1123 08:57:26.917419   62034 ssh_runner.go:195] Run: sudo diff -u /var/tmp/minikube/kubeadm.yaml /var/tmp/minikube/kubeadm.yaml.new
	I1123 08:57:26.931495   62034 kubeadm.go:635] The running cluster does not require reconfiguration: 192.168.72.170
	I1123 08:57:26.931533   62034 kubeadm.go:1161] stopping kube-system containers ...
	I1123 08:57:26.931598   62034 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
	I1123 08:57:26.956424   62034 docker.go:484] Stopping containers: [2777bc8dc9d8 1e7fd2e1de3d 766f92e6b85c a4e7b815df08 b78206bd7ac1 246623b92954 0f2d7243cca6 5dc3731f3932 12f2dd5a9262 45882ff88b2f 8437f8a92375 866aa8687d31 230241a2edf7 1c8b359647bb 038fcdc4f7f6 049872fe8a58]
	I1123 08:57:26.956515   62034 ssh_runner.go:195] Run: docker stop 2777bc8dc9d8 1e7fd2e1de3d 766f92e6b85c a4e7b815df08 b78206bd7ac1 246623b92954 0f2d7243cca6 5dc3731f3932 12f2dd5a9262 45882ff88b2f 8437f8a92375 866aa8687d31 230241a2edf7 1c8b359647bb 038fcdc4f7f6 049872fe8a58
	I1123 08:57:26.982476   62034 ssh_runner.go:195] Run: sudo systemctl stop kubelet
	I1123 08:57:27.015459   62034 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
	I1123 08:57:27.030576   62034 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
	stdout:
	
	stderr:
	ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
	ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
	ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
	ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
	I1123 08:57:27.030600   62034 kubeadm.go:158] found existing configuration files:
	
	I1123 08:57:27.030658   62034 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
	I1123 08:57:27.043658   62034 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
	stdout:
	
	stderr:
	grep: /etc/kubernetes/admin.conf: No such file or directory
	I1123 08:57:27.043723   62034 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
	I1123 08:57:27.058167   62034 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
	I1123 08:57:27.074375   62034 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
	stdout:
	
	stderr:
	grep: /etc/kubernetes/kubelet.conf: No such file or directory
	I1123 08:57:27.074449   62034 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
	I1123 08:57:27.091119   62034 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
	I1123 08:57:27.106772   62034 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
	stdout:
	
	stderr:
	grep: /etc/kubernetes/controller-manager.conf: No such file or directory
	I1123 08:57:27.106876   62034 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
	I1123 08:57:27.124425   62034 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
	I1123 08:57:27.140001   62034 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
	stdout:
	
	stderr:
	grep: /etc/kubernetes/scheduler.conf: No such file or directory
	I1123 08:57:27.140061   62034 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
	I1123 08:57:27.154930   62034 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
	I1123 08:57:27.169444   62034 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase certs all --config /var/tmp/minikube/kubeadm.yaml"
	I1123 08:57:27.328883   62034 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase kubeconfig all --config /var/tmp/minikube/kubeadm.yaml"
	I1123 08:57:24.261134   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:24.261787   62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
	I1123 08:57:24.261806   62386 main.go:143] libmachine: trying to list again with source=arp
	I1123 08:57:24.262181   62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
	I1123 08:57:24.262219   62386 retry.go:31] will retry after 1.137472458s: waiting for domain to come up
	I1123 08:57:25.401597   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:25.402373   62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
	I1123 08:57:25.402395   62386 main.go:143] libmachine: trying to list again with source=arp
	I1123 08:57:25.402716   62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
	I1123 08:57:25.402745   62386 retry.go:31] will retry after 1.246843188s: waiting for domain to come up
	I1123 08:57:26.651383   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:26.652402   62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
	I1123 08:57:26.652423   62386 main.go:143] libmachine: trying to list again with source=arp
	I1123 08:57:26.652983   62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
	I1123 08:57:26.653027   62386 retry.go:31] will retry after 1.576847177s: waiting for domain to come up
	I1123 08:57:28.231063   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:28.231892   62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
	I1123 08:57:28.231907   62386 main.go:143] libmachine: trying to list again with source=arp
	I1123 08:57:28.232342   62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
	I1123 08:57:28.232376   62386 retry.go:31] will retry after 2.191968701s: waiting for domain to come up
	I1123 08:57:29.072122   62034 ssh_runner.go:235] Completed: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase kubeconfig all --config /var/tmp/minikube/kubeadm.yaml": (1.743194687s)
	I1123 08:57:29.072199   62034 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase kubelet-start --config /var/tmp/minikube/kubeadm.yaml"
	I1123 08:57:29.363322   62034 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase control-plane all --config /var/tmp/minikube/kubeadm.yaml"
	I1123 08:57:29.437121   62034 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase etcd local --config /var/tmp/minikube/kubeadm.yaml"
	I1123 08:57:29.519180   62034 api_server.go:52] waiting for apiserver process to appear ...
	I1123 08:57:29.519372   62034 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I1123 08:57:30.019409   62034 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I1123 08:57:30.519973   62034 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I1123 08:57:31.019428   62034 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I1123 08:57:31.127420   62034 api_server.go:72] duration metric: took 1.608256805s to wait for apiserver process to appear ...
	I1123 08:57:31.127455   62034 api_server.go:88] waiting for apiserver healthz status ...
	I1123 08:57:31.127480   62034 api_server.go:253] Checking apiserver healthz at https://192.168.72.170:8443/healthz ...
	I1123 08:57:31.128203   62034 api_server.go:269] stopped: https://192.168.72.170:8443/healthz: Get "https://192.168.72.170:8443/healthz": dial tcp 192.168.72.170:8443: connect: connection refused
	I1123 08:57:31.627812   62034 api_server.go:253] Checking apiserver healthz at https://192.168.72.170:8443/healthz ...
	I1123 08:57:30.426848   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:30.427811   62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
	I1123 08:57:30.427838   62386 main.go:143] libmachine: trying to list again with source=arp
	I1123 08:57:30.428254   62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
	I1123 08:57:30.428293   62386 retry.go:31] will retry after 2.66246372s: waiting for domain to come up
	I1123 08:57:33.093605   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:33.094467   62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
	I1123 08:57:33.094487   62386 main.go:143] libmachine: trying to list again with source=arp
	I1123 08:57:33.095017   62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
	I1123 08:57:33.095058   62386 retry.go:31] will retry after 2.368738453s: waiting for domain to come up
	I1123 08:57:34.364730   62034 api_server.go:279] https://192.168.72.170:8443/healthz returned 403:
	{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
	W1123 08:57:34.364762   62034 api_server.go:103] status: https://192.168.72.170:8443/healthz returned error 403:
	{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
	I1123 08:57:34.364778   62034 api_server.go:253] Checking apiserver healthz at https://192.168.72.170:8443/healthz ...
	I1123 08:57:34.401309   62034 api_server.go:279] https://192.168.72.170:8443/healthz returned 403:
	{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
	W1123 08:57:34.401349   62034 api_server.go:103] status: https://192.168.72.170:8443/healthz returned error 403:
	{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
	I1123 08:57:34.627677   62034 api_server.go:253] Checking apiserver healthz at https://192.168.72.170:8443/healthz ...
	I1123 08:57:34.639017   62034 api_server.go:279] https://192.168.72.170:8443/healthz returned 500:
	[+]ping ok
	[+]log ok
	[-]etcd failed: reason withheld
	[+]poststarthook/start-apiserver-admission-initializer ok
	[+]poststarthook/generic-apiserver-start-informers ok
	[+]poststarthook/priority-and-fairness-config-consumer ok
	[+]poststarthook/priority-and-fairness-filter ok
	[+]poststarthook/storage-object-count-tracker-hook ok
	[+]poststarthook/start-apiextensions-informers ok
	[+]poststarthook/start-apiextensions-controllers ok
	[+]poststarthook/crd-informer-synced ok
	[+]poststarthook/start-system-namespaces-controller ok
	[+]poststarthook/start-cluster-authentication-info-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
	[+]poststarthook/start-legacy-token-tracking-controller ok
	[+]poststarthook/start-service-ip-repair-controllers ok
	[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
	[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
	[+]poststarthook/priority-and-fairness-config-producer ok
	[+]poststarthook/bootstrap-controller ok
	[+]poststarthook/start-kubernetes-service-cidr-controller ok
	[+]poststarthook/aggregator-reload-proxy-client-cert ok
	[+]poststarthook/start-kube-aggregator-informers ok
	[+]poststarthook/apiservice-status-local-available-controller ok
	[+]poststarthook/apiservice-status-remote-available-controller ok
	[+]poststarthook/apiservice-registration-controller ok
	[+]poststarthook/apiservice-discovery-controller ok
	[+]poststarthook/kube-apiserver-autoregistration ok
	[+]autoregister-completion ok
	[+]poststarthook/apiservice-openapi-controller ok
	[+]poststarthook/apiservice-openapiv3-controller ok
	healthz check failed
	W1123 08:57:34.639052   62034 api_server.go:103] status: https://192.168.72.170:8443/healthz returned error 500:
	[+]ping ok
	[+]log ok
	[-]etcd failed: reason withheld
	[+]poststarthook/start-apiserver-admission-initializer ok
	[+]poststarthook/generic-apiserver-start-informers ok
	[+]poststarthook/priority-and-fairness-config-consumer ok
	[+]poststarthook/priority-and-fairness-filter ok
	[+]poststarthook/storage-object-count-tracker-hook ok
	[+]poststarthook/start-apiextensions-informers ok
	[+]poststarthook/start-apiextensions-controllers ok
	[+]poststarthook/crd-informer-synced ok
	[+]poststarthook/start-system-namespaces-controller ok
	[+]poststarthook/start-cluster-authentication-info-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
	[+]poststarthook/start-legacy-token-tracking-controller ok
	[+]poststarthook/start-service-ip-repair-controllers ok
	[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
	[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
	[+]poststarthook/priority-and-fairness-config-producer ok
	[+]poststarthook/bootstrap-controller ok
	[+]poststarthook/start-kubernetes-service-cidr-controller ok
	[+]poststarthook/aggregator-reload-proxy-client-cert ok
	[+]poststarthook/start-kube-aggregator-informers ok
	[+]poststarthook/apiservice-status-local-available-controller ok
	[+]poststarthook/apiservice-status-remote-available-controller ok
	[+]poststarthook/apiservice-registration-controller ok
	[+]poststarthook/apiservice-discovery-controller ok
	[+]poststarthook/kube-apiserver-autoregistration ok
	[+]autoregister-completion ok
	[+]poststarthook/apiservice-openapi-controller ok
	[+]poststarthook/apiservice-openapiv3-controller ok
	healthz check failed
	I1123 08:57:35.127669   62034 api_server.go:253] Checking apiserver healthz at https://192.168.72.170:8443/healthz ...
	I1123 08:57:35.133471   62034 api_server.go:279] https://192.168.72.170:8443/healthz returned 500:
	[+]ping ok
	[+]log ok
	[+]etcd ok
	[+]poststarthook/start-apiserver-admission-initializer ok
	[+]poststarthook/generic-apiserver-start-informers ok
	[+]poststarthook/priority-and-fairness-config-consumer ok
	[+]poststarthook/priority-and-fairness-filter ok
	[+]poststarthook/storage-object-count-tracker-hook ok
	[+]poststarthook/start-apiextensions-informers ok
	[+]poststarthook/start-apiextensions-controllers ok
	[+]poststarthook/crd-informer-synced ok
	[+]poststarthook/start-system-namespaces-controller ok
	[+]poststarthook/start-cluster-authentication-info-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
	[+]poststarthook/start-legacy-token-tracking-controller ok
	[+]poststarthook/start-service-ip-repair-controllers ok
	[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
	[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
	[+]poststarthook/priority-and-fairness-config-producer ok
	[+]poststarthook/bootstrap-controller ok
	[+]poststarthook/start-kubernetes-service-cidr-controller ok
	[+]poststarthook/aggregator-reload-proxy-client-cert ok
	[+]poststarthook/start-kube-aggregator-informers ok
	[+]poststarthook/apiservice-status-local-available-controller ok
	[+]poststarthook/apiservice-status-remote-available-controller ok
	[+]poststarthook/apiservice-registration-controller ok
	[+]poststarthook/apiservice-discovery-controller ok
	[+]poststarthook/kube-apiserver-autoregistration ok
	[+]autoregister-completion ok
	[+]poststarthook/apiservice-openapi-controller ok
	[+]poststarthook/apiservice-openapiv3-controller ok
	healthz check failed
	W1123 08:57:35.133500   62034 api_server.go:103] status: https://192.168.72.170:8443/healthz returned error 500:
	[+]ping ok
	[+]log ok
	[+]etcd ok
	[+]poststarthook/start-apiserver-admission-initializer ok
	[+]poststarthook/generic-apiserver-start-informers ok
	[+]poststarthook/priority-and-fairness-config-consumer ok
	[+]poststarthook/priority-and-fairness-filter ok
	[+]poststarthook/storage-object-count-tracker-hook ok
	[+]poststarthook/start-apiextensions-informers ok
	[+]poststarthook/start-apiextensions-controllers ok
	[+]poststarthook/crd-informer-synced ok
	[+]poststarthook/start-system-namespaces-controller ok
	[+]poststarthook/start-cluster-authentication-info-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
	[+]poststarthook/start-legacy-token-tracking-controller ok
	[+]poststarthook/start-service-ip-repair-controllers ok
	[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
	[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
	[+]poststarthook/priority-and-fairness-config-producer ok
	[+]poststarthook/bootstrap-controller ok
	[+]poststarthook/start-kubernetes-service-cidr-controller ok
	[+]poststarthook/aggregator-reload-proxy-client-cert ok
	[+]poststarthook/start-kube-aggregator-informers ok
	[+]poststarthook/apiservice-status-local-available-controller ok
	[+]poststarthook/apiservice-status-remote-available-controller ok
	[+]poststarthook/apiservice-registration-controller ok
	[+]poststarthook/apiservice-discovery-controller ok
	[+]poststarthook/kube-apiserver-autoregistration ok
	[+]autoregister-completion ok
	[+]poststarthook/apiservice-openapi-controller ok
	[+]poststarthook/apiservice-openapiv3-controller ok
	healthz check failed
	I1123 08:57:35.628190   62034 api_server.go:253] Checking apiserver healthz at https://192.168.72.170:8443/healthz ...
	I1123 08:57:35.637607   62034 api_server.go:279] https://192.168.72.170:8443/healthz returned 500:
	[+]ping ok
	[+]log ok
	[+]etcd ok
	[+]poststarthook/start-apiserver-admission-initializer ok
	[+]poststarthook/generic-apiserver-start-informers ok
	[+]poststarthook/priority-and-fairness-config-consumer ok
	[+]poststarthook/priority-and-fairness-filter ok
	[+]poststarthook/storage-object-count-tracker-hook ok
	[+]poststarthook/start-apiextensions-informers ok
	[+]poststarthook/start-apiextensions-controllers ok
	[+]poststarthook/crd-informer-synced ok
	[+]poststarthook/start-system-namespaces-controller ok
	[+]poststarthook/start-cluster-authentication-info-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
	[+]poststarthook/start-legacy-token-tracking-controller ok
	[+]poststarthook/start-service-ip-repair-controllers ok
	[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
	[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
	[+]poststarthook/priority-and-fairness-config-producer ok
	[+]poststarthook/bootstrap-controller ok
	[+]poststarthook/start-kubernetes-service-cidr-controller ok
	[+]poststarthook/aggregator-reload-proxy-client-cert ok
	[+]poststarthook/start-kube-aggregator-informers ok
	[+]poststarthook/apiservice-status-local-available-controller ok
	[+]poststarthook/apiservice-status-remote-available-controller ok
	[+]poststarthook/apiservice-registration-controller ok
	[+]poststarthook/apiservice-discovery-controller ok
	[+]poststarthook/kube-apiserver-autoregistration ok
	[+]autoregister-completion ok
	[+]poststarthook/apiservice-openapi-controller ok
	[+]poststarthook/apiservice-openapiv3-controller ok
	healthz check failed
	W1123 08:57:35.637636   62034 api_server.go:103] status: https://192.168.72.170:8443/healthz returned error 500:
	[+]ping ok
	[+]log ok
	[+]etcd ok
	[+]poststarthook/start-apiserver-admission-initializer ok
	[+]poststarthook/generic-apiserver-start-informers ok
	[+]poststarthook/priority-and-fairness-config-consumer ok
	[+]poststarthook/priority-and-fairness-filter ok
	[+]poststarthook/storage-object-count-tracker-hook ok
	[+]poststarthook/start-apiextensions-informers ok
	[+]poststarthook/start-apiextensions-controllers ok
	[+]poststarthook/crd-informer-synced ok
	[+]poststarthook/start-system-namespaces-controller ok
	[+]poststarthook/start-cluster-authentication-info-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
	[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
	[+]poststarthook/start-legacy-token-tracking-controller ok
	[+]poststarthook/start-service-ip-repair-controllers ok
	[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
	[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
	[+]poststarthook/priority-and-fairness-config-producer ok
	[+]poststarthook/bootstrap-controller ok
	[+]poststarthook/start-kubernetes-service-cidr-controller ok
	[+]poststarthook/aggregator-reload-proxy-client-cert ok
	[+]poststarthook/start-kube-aggregator-informers ok
	[+]poststarthook/apiservice-status-local-available-controller ok
	[+]poststarthook/apiservice-status-remote-available-controller ok
	[+]poststarthook/apiservice-registration-controller ok
	[+]poststarthook/apiservice-discovery-controller ok
	[+]poststarthook/kube-apiserver-autoregistration ok
	[+]autoregister-completion ok
	[+]poststarthook/apiservice-openapi-controller ok
	[+]poststarthook/apiservice-openapiv3-controller ok
	healthz check failed
	I1123 08:57:36.128401   62034 api_server.go:253] Checking apiserver healthz at https://192.168.72.170:8443/healthz ...
	I1123 08:57:36.134007   62034 api_server.go:279] https://192.168.72.170:8443/healthz returned 200:
	ok
	I1123 08:57:36.142338   62034 api_server.go:141] control plane version: v1.34.1
	I1123 08:57:36.142374   62034 api_server.go:131] duration metric: took 5.014912025s to wait for apiserver health ...
	I1123 08:57:36.142383   62034 cni.go:84] Creating CNI manager for ""
	I1123 08:57:36.142394   62034 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
	I1123 08:57:36.144644   62034 out.go:179] * Configuring bridge CNI (Container Networking Interface) ...
	I1123 08:57:36.146156   62034 ssh_runner.go:195] Run: sudo mkdir -p /etc/cni/net.d
	I1123 08:57:36.172405   62034 ssh_runner.go:362] scp memory --> /etc/cni/net.d/1-k8s.conflist (496 bytes)
	I1123 08:57:36.206117   62034 system_pods.go:43] waiting for kube-system pods to appear ...
	I1123 08:57:36.212151   62034 system_pods.go:59] 8 kube-system pods found
	I1123 08:57:36.212192   62034 system_pods.go:61] "coredns-66bc5c9577-665gz" [95fc7e21-4842-4c82-8e6a-aacd9494cdaf] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
	I1123 08:57:36.212201   62034 system_pods.go:61] "etcd-embed-certs-059363" [fa029d3b-b887-4f84-9479-84020bb36c03] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
	I1123 08:57:36.212209   62034 system_pods.go:61] "kube-apiserver-embed-certs-059363" [4949b4bd-7e15-4092-90e1-215419673b50] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
	I1123 08:57:36.212215   62034 system_pods.go:61] "kube-controller-manager-embed-certs-059363" [4bf4b11c-274e-4bc4-b4f7-39b40f9ea51b] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
	I1123 08:57:36.212219   62034 system_pods.go:61] "kube-proxy-sjvcr" [73a4ab24-78f1-4223-9e4b-fbf39c225875] Running
	I1123 08:57:36.212227   62034 system_pods.go:61] "kube-scheduler-embed-certs-059363" [2ad27af2-3f59-44b5-b888-c5fee6b5db68] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
	I1123 08:57:36.212254   62034 system_pods.go:61] "metrics-server-746fcd58dc-jc8k8" [93a43ecf-712d-44ba-a709-9bc223d0990e] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
	I1123 08:57:36.212263   62034 system_pods.go:61] "storage-provisioner" [3a6c5ffc-b8ab-4fc3-bdaa-048e59ab4766] Running
	I1123 08:57:36.212272   62034 system_pods.go:74] duration metric: took 6.125497ms to wait for pod list to return data ...
	I1123 08:57:36.212281   62034 node_conditions.go:102] verifying NodePressure condition ...
	I1123 08:57:36.216399   62034 node_conditions.go:122] node storage ephemeral capacity is 17734596Ki
	I1123 08:57:36.216437   62034 node_conditions.go:123] node cpu capacity is 2
	I1123 08:57:36.216455   62034 node_conditions.go:105] duration metric: took 4.163261ms to run NodePressure ...
	I1123 08:57:36.216523   62034 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init phase addon all --config /var/tmp/minikube/kubeadm.yaml"
	I1123 08:57:36.499954   62034 kubeadm.go:729] waiting for restarted kubelet to initialise ...
	I1123 08:57:36.504225   62034 kubeadm.go:744] kubelet initialised
	I1123 08:57:36.504271   62034 kubeadm.go:745] duration metric: took 4.279186ms waiting for restarted kubelet to initialise ...
	I1123 08:57:36.504293   62034 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
	I1123 08:57:36.525819   62034 ops.go:34] apiserver oom_adj: -16
	I1123 08:57:36.525847   62034 kubeadm.go:602] duration metric: took 9.653035112s to restartPrimaryControlPlane
	I1123 08:57:36.525859   62034 kubeadm.go:403] duration metric: took 9.689268169s to StartCluster
	I1123 08:57:36.525879   62034 settings.go:142] acquiring lock: {Name:mk0efabf238cb985c892ac3a9b32ac206b9f2336 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1123 08:57:36.525969   62034 settings.go:150] Updating kubeconfig:  /home/jenkins/minikube-integration/21966-18241/kubeconfig
	I1123 08:57:36.527038   62034 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-18241/kubeconfig: {Name:mk4ff9c09d937b27d93688a0eb9fbee2087daab0 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1123 08:57:36.527368   62034 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.72.170 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}
	I1123 08:57:36.527458   62034 addons.go:527] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:true default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:true nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
	I1123 08:57:36.527579   62034 config.go:182] Loaded profile config "embed-certs-059363": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
	I1123 08:57:36.527600   62034 addons.go:70] Setting metrics-server=true in profile "embed-certs-059363"
	I1123 08:57:36.527599   62034 addons.go:70] Setting default-storageclass=true in profile "embed-certs-059363"
	I1123 08:57:36.527579   62034 addons.go:70] Setting storage-provisioner=true in profile "embed-certs-059363"
	I1123 08:57:36.527644   62034 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "embed-certs-059363"
	I1123 08:57:36.527635   62034 addons.go:70] Setting dashboard=true in profile "embed-certs-059363"
	I1123 08:57:36.527665   62034 addons.go:239] Setting addon dashboard=true in "embed-certs-059363"
	W1123 08:57:36.527679   62034 addons.go:248] addon dashboard should already be in state true
	I1123 08:57:36.527666   62034 cache.go:107] acquiring lock: {Name:mk5578ff0020d8c222414769e0c7ca17014d52f1 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I1123 08:57:36.527671   62034 addons.go:239] Setting addon storage-provisioner=true in "embed-certs-059363"
	W1123 08:57:36.527702   62034 addons.go:248] addon storage-provisioner should already be in state true
	I1123 08:57:36.527733   62034 cache.go:115] /home/jenkins/minikube-integration/21966-18241/.minikube/cache/images/amd64/gcr.io/k8s-minikube/gvisor-addon_2 exists
	I1123 08:57:36.527637   62034 addons.go:239] Setting addon metrics-server=true in "embed-certs-059363"
	I1123 08:57:36.527748   62034 cache.go:96] cache image "gcr.io/k8s-minikube/gvisor-addon:2" -> "/home/jenkins/minikube-integration/21966-18241/.minikube/cache/images/amd64/gcr.io/k8s-minikube/gvisor-addon_2" took 96.823µs
	I1123 08:57:36.527758   62034 cache.go:80] save to tar file gcr.io/k8s-minikube/gvisor-addon:2 -> /home/jenkins/minikube-integration/21966-18241/.minikube/cache/images/amd64/gcr.io/k8s-minikube/gvisor-addon_2 succeeded
	I1123 08:57:36.527763   62034 host.go:66] Checking if "embed-certs-059363" exists ...
	I1123 08:57:36.527766   62034 cache.go:87] Successfully saved all images to host disk.
	W1123 08:57:36.527758   62034 addons.go:248] addon metrics-server should already be in state true
	I1123 08:57:36.527796   62034 host.go:66] Checking if "embed-certs-059363" exists ...
	I1123 08:57:36.527738   62034 host.go:66] Checking if "embed-certs-059363" exists ...
	I1123 08:57:36.527934   62034 config.go:182] Loaded profile config "embed-certs-059363": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
	I1123 08:57:36.529271   62034 out.go:179] * Verifying Kubernetes components...
	I1123 08:57:36.530935   62034 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
	I1123 08:57:36.531022   62034 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1123 08:57:36.532294   62034 addons.go:239] Setting addon default-storageclass=true in "embed-certs-059363"
	W1123 08:57:36.532326   62034 addons.go:248] addon default-storageclass should already be in state true
	I1123 08:57:36.532348   62034 host.go:66] Checking if "embed-certs-059363" exists ...
	I1123 08:57:36.533191   62034 out.go:179]   - Using image registry.k8s.io/echoserver:1.4
	I1123 08:57:36.533215   62034 out.go:179]   - Using image gcr.io/k8s-minikube/storage-provisioner:v5
	I1123 08:57:36.533195   62034 out.go:179]   - Using image fake.domain/registry.k8s.io/echoserver:1.4
	I1123 08:57:36.534073   62034 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml
	I1123 08:57:36.534091   62034 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
	I1123 08:57:36.534667   62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:36.535129   62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
	I1123 08:57:36.535347   62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:36.535858   62034 sshutil.go:53] new ssh client: &{IP:192.168.72.170 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/embed-certs-059363/id_rsa Username:docker}
	I1123 08:57:36.536061   62034 addons.go:436] installing /etc/kubernetes/addons/metrics-apiservice.yaml
	I1123 08:57:36.536084   62034 ssh_runner.go:362] scp metrics-server/metrics-apiservice.yaml --> /etc/kubernetes/addons/metrics-apiservice.yaml (424 bytes)
	I1123 08:57:36.536132   62034 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml
	I1123 08:57:36.536145   62034 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
	I1123 08:57:36.536880   62034 out.go:179]   - Using image docker.io/kubernetesui/dashboard:v2.7.0
	I1123 08:57:36.537788   62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:36.538214   62034 addons.go:436] installing /etc/kubernetes/addons/dashboard-ns.yaml
	I1123 08:57:36.538249   62034 ssh_runner.go:362] scp dashboard/dashboard-ns.yaml --> /etc/kubernetes/addons/dashboard-ns.yaml (759 bytes)
	I1123 08:57:36.538746   62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
	I1123 08:57:36.538816   62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:36.539088   62034 sshutil.go:53] new ssh client: &{IP:192.168.72.170 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/embed-certs-059363/id_rsa Username:docker}
	I1123 08:57:36.540090   62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:36.540146   62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:36.541026   62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
	I1123 08:57:36.541069   62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:36.541120   62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
	I1123 08:57:36.541158   62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:36.541257   62034 sshutil.go:53] new ssh client: &{IP:192.168.72.170 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/embed-certs-059363/id_rsa Username:docker}
	I1123 08:57:36.541514   62034 sshutil.go:53] new ssh client: &{IP:192.168.72.170 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/embed-certs-059363/id_rsa Username:docker}
	I1123 08:57:36.542423   62034 main.go:143] libmachine: domain embed-certs-059363 has defined MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:36.542896   62034 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:9b:54:8b", ip: ""} in network mk-embed-certs-059363: {Iface:virbr4 ExpiryTime:2025-11-23 09:57:12 +0000 UTC Type:0 Mac:52:54:00:9b:54:8b Iaid: IPaddr:192.168.72.170 Prefix:24 Hostname:embed-certs-059363 Clientid:01:52:54:00:9b:54:8b}
	I1123 08:57:36.542931   62034 main.go:143] libmachine: domain embed-certs-059363 has defined IP address 192.168.72.170 and MAC address 52:54:00:9b:54:8b in network mk-embed-certs-059363
	I1123 08:57:36.543116   62034 sshutil.go:53] new ssh client: &{IP:192.168.72.170 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/embed-certs-059363/id_rsa Username:docker}
	I1123 08:57:36.844170   62034 ssh_runner.go:195] Run: sudo systemctl start kubelet
	I1123 08:57:36.869742   62034 node_ready.go:35] waiting up to 6m0s for node "embed-certs-059363" to be "Ready" ...
	I1123 08:57:36.960323   62034 docker.go:691] Got preloaded images: -- stdout --
	gcr.io/k8s-minikube/gvisor-addon:2
	registry.k8s.io/kube-scheduler:v1.34.1
	registry.k8s.io/kube-apiserver:v1.34.1
	registry.k8s.io/kube-controller-manager:v1.34.1
	registry.k8s.io/kube-proxy:v1.34.1
	registry.k8s.io/etcd:3.6.4-0
	registry.k8s.io/pause:3.10.1
	registry.k8s.io/coredns/coredns:v1.12.1
	gcr.io/k8s-minikube/storage-provisioner:v5
	gcr.io/k8s-minikube/busybox:1.28.4-glibc
	
	-- /stdout --
	I1123 08:57:36.960371   62034 cache_images.go:86] Images are preloaded, skipping loading
	I1123 08:57:36.960379   62034 cache_images.go:264] succeeded pushing to: embed-certs-059363
	I1123 08:57:37.000609   62034 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
	I1123 08:57:37.008492   62034 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
	I1123 08:57:37.017692   62034 addons.go:436] installing /etc/kubernetes/addons/metrics-server-deployment.yaml
	I1123 08:57:37.017713   62034 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-deployment.yaml (1825 bytes)
	I1123 08:57:37.020529   62034 addons.go:436] installing /etc/kubernetes/addons/dashboard-clusterrole.yaml
	I1123 08:57:37.020561   62034 ssh_runner.go:362] scp dashboard/dashboard-clusterrole.yaml --> /etc/kubernetes/addons/dashboard-clusterrole.yaml (1001 bytes)
	I1123 08:57:37.074670   62034 addons.go:436] installing /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml
	I1123 08:57:37.074710   62034 ssh_runner.go:362] scp dashboard/dashboard-clusterrolebinding.yaml --> /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml (1018 bytes)
	I1123 08:57:37.076076   62034 addons.go:436] installing /etc/kubernetes/addons/metrics-server-rbac.yaml
	I1123 08:57:37.076096   62034 ssh_runner.go:362] scp metrics-server/metrics-server-rbac.yaml --> /etc/kubernetes/addons/metrics-server-rbac.yaml (2175 bytes)
	I1123 08:57:37.132446   62034 addons.go:436] installing /etc/kubernetes/addons/metrics-server-service.yaml
	I1123 08:57:37.132466   62034 ssh_runner.go:362] scp metrics-server/metrics-server-service.yaml --> /etc/kubernetes/addons/metrics-server-service.yaml (446 bytes)
	I1123 08:57:37.134322   62034 addons.go:436] installing /etc/kubernetes/addons/dashboard-configmap.yaml
	I1123 08:57:37.134339   62034 ssh_runner.go:362] scp dashboard/dashboard-configmap.yaml --> /etc/kubernetes/addons/dashboard-configmap.yaml (837 bytes)
	I1123 08:57:37.188291   62034 addons.go:436] installing /etc/kubernetes/addons/dashboard-dp.yaml
	I1123 08:57:37.188311   62034 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/dashboard-dp.yaml (4201 bytes)
	I1123 08:57:37.200924   62034 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
	I1123 08:57:37.265084   62034 addons.go:436] installing /etc/kubernetes/addons/dashboard-role.yaml
	I1123 08:57:37.265109   62034 ssh_runner.go:362] scp dashboard/dashboard-role.yaml --> /etc/kubernetes/addons/dashboard-role.yaml (1724 bytes)
	I1123 08:57:37.341532   62034 addons.go:436] installing /etc/kubernetes/addons/dashboard-rolebinding.yaml
	I1123 08:57:37.341559   62034 ssh_runner.go:362] scp dashboard/dashboard-rolebinding.yaml --> /etc/kubernetes/addons/dashboard-rolebinding.yaml (1046 bytes)
	I1123 08:57:37.425079   62034 addons.go:436] installing /etc/kubernetes/addons/dashboard-sa.yaml
	I1123 08:57:37.425110   62034 ssh_runner.go:362] scp dashboard/dashboard-sa.yaml --> /etc/kubernetes/addons/dashboard-sa.yaml (837 bytes)
	I1123 08:57:37.510704   62034 addons.go:436] installing /etc/kubernetes/addons/dashboard-secret.yaml
	I1123 08:57:37.510748   62034 ssh_runner.go:362] scp dashboard/dashboard-secret.yaml --> /etc/kubernetes/addons/dashboard-secret.yaml (1389 bytes)
	I1123 08:57:37.600957   62034 addons.go:436] installing /etc/kubernetes/addons/dashboard-svc.yaml
	I1123 08:57:37.600982   62034 ssh_runner.go:362] scp dashboard/dashboard-svc.yaml --> /etc/kubernetes/addons/dashboard-svc.yaml (1294 bytes)
	I1123 08:57:37.663098   62034 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml
	I1123 08:57:38.728547   62034 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (1.720019273s)
	I1123 08:57:38.824306   62034 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: (1.623332944s)
	I1123 08:57:38.824374   62034 addons.go:495] Verifying addon metrics-server=true in "embed-certs-059363"
	W1123 08:57:38.886375   62034 node_ready.go:57] node "embed-certs-059363" has "Ready":"False" status (will retry)
	I1123 08:57:39.122207   62034 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/dashboard-ns.yaml -f /etc/kubernetes/addons/dashboard-clusterrole.yaml -f /etc/kubernetes/addons/dashboard-clusterrolebinding.yaml -f /etc/kubernetes/addons/dashboard-configmap.yaml -f /etc/kubernetes/addons/dashboard-dp.yaml -f /etc/kubernetes/addons/dashboard-role.yaml -f /etc/kubernetes/addons/dashboard-rolebinding.yaml -f /etc/kubernetes/addons/dashboard-sa.yaml -f /etc/kubernetes/addons/dashboard-secret.yaml -f /etc/kubernetes/addons/dashboard-svc.yaml: (1.459038888s)
	I1123 08:57:39.124248   62034 out.go:179] * Some dashboard features require the metrics-server addon. To enable all features please run:
	
		minikube -p embed-certs-059363 addons enable metrics-server
	
	I1123 08:57:39.126125   62034 out.go:179] * Enabled addons: default-storageclass, storage-provisioner, metrics-server, dashboard
	I1123 08:57:35.465742   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:35.466525   62386 main.go:143] libmachine: no network interface addresses found for domain newest-cni-078196 (source=lease)
	I1123 08:57:35.466540   62386 main.go:143] libmachine: trying to list again with source=arp
	I1123 08:57:35.467003   62386 main.go:143] libmachine: unable to find current IP address of domain newest-cni-078196 in network mk-newest-cni-078196 (interfaces detected: [])
	I1123 08:57:35.467033   62386 retry.go:31] will retry after 4.454598391s: waiting for domain to come up
	I1123 08:57:42.467134   62480 start.go:364] duration metric: took 25.46601127s to acquireMachinesLock for "default-k8s-diff-port-925051"
	I1123 08:57:42.467190   62480 start.go:96] Skipping create...Using existing machine configuration
	I1123 08:57:42.467196   62480 fix.go:54] fixHost starting: 
	I1123 08:57:42.469900   62480 fix.go:112] recreateIfNeeded on default-k8s-diff-port-925051: state=Stopped err=<nil>
	W1123 08:57:42.469946   62480 fix.go:138] unexpected machine state, will restart: <nil>
	I1123 08:57:39.127521   62034 addons.go:530] duration metric: took 2.600069679s for enable addons: enabled=[default-storageclass storage-provisioner metrics-server dashboard]
	W1123 08:57:41.375432   62034 node_ready.go:57] node "embed-certs-059363" has "Ready":"False" status (will retry)
	I1123 08:57:39.922903   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:39.923713   62386 main.go:143] libmachine: domain newest-cni-078196 has current primary IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:39.923726   62386 main.go:143] libmachine: found domain IP: 192.168.39.87
	I1123 08:57:39.923732   62386 main.go:143] libmachine: reserving static IP address...
	I1123 08:57:39.924129   62386 main.go:143] libmachine: unable to find host DHCP lease matching {name: "newest-cni-078196", mac: "52:54:00:d7:c1:0d", ip: "192.168.39.87"} in network mk-newest-cni-078196
	I1123 08:57:40.154544   62386 main.go:143] libmachine: reserved static IP address 192.168.39.87 for domain newest-cni-078196
	I1123 08:57:40.154569   62386 main.go:143] libmachine: waiting for SSH...
	I1123 08:57:40.154577   62386 main.go:143] libmachine: Getting to WaitForSSH function...
	I1123 08:57:40.157877   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:40.158255   62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:minikube Clientid:01:52:54:00:d7:c1:0d}
	I1123 08:57:40.158277   62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:40.158452   62386 main.go:143] libmachine: Using SSH client type: native
	I1123 08:57:40.158677   62386 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.39.87 22 <nil> <nil>}
	I1123 08:57:40.158690   62386 main.go:143] libmachine: About to run SSH command:
	exit 0
	I1123 08:57:40.266068   62386 main.go:143] libmachine: SSH cmd err, output: <nil>: 
	I1123 08:57:40.266484   62386 main.go:143] libmachine: domain creation complete
	I1123 08:57:40.268135   62386 machine.go:94] provisionDockerMachine start ...
	I1123 08:57:40.270701   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:40.271083   62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
	I1123 08:57:40.271106   62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:40.271243   62386 main.go:143] libmachine: Using SSH client type: native
	I1123 08:57:40.271436   62386 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.39.87 22 <nil> <nil>}
	I1123 08:57:40.271446   62386 main.go:143] libmachine: About to run SSH command:
	hostname
	I1123 08:57:40.377718   62386 main.go:143] libmachine: SSH cmd err, output: <nil>: minikube
	
	I1123 08:57:40.377749   62386 buildroot.go:166] provisioning hostname "newest-cni-078196"
	I1123 08:57:40.381682   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:40.382224   62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
	I1123 08:57:40.382274   62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:40.382549   62386 main.go:143] libmachine: Using SSH client type: native
	I1123 08:57:40.382750   62386 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.39.87 22 <nil> <nil>}
	I1123 08:57:40.382763   62386 main.go:143] libmachine: About to run SSH command:
	sudo hostname newest-cni-078196 && echo "newest-cni-078196" | sudo tee /etc/hostname
	I1123 08:57:40.510920   62386 main.go:143] libmachine: SSH cmd err, output: <nil>: newest-cni-078196
	
	I1123 08:57:40.514470   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:40.514870   62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
	I1123 08:57:40.514901   62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:40.515119   62386 main.go:143] libmachine: Using SSH client type: native
	I1123 08:57:40.515349   62386 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.39.87 22 <nil> <nil>}
	I1123 08:57:40.515373   62386 main.go:143] libmachine: About to run SSH command:
	
			if ! grep -xq '.*\snewest-cni-078196' /etc/hosts; then
				if grep -xq '127.0.1.1\s.*' /etc/hosts; then
					sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 newest-cni-078196/g' /etc/hosts;
				else 
					echo '127.0.1.1 newest-cni-078196' | sudo tee -a /etc/hosts; 
				fi
			fi
	I1123 08:57:40.644008   62386 main.go:143] libmachine: SSH cmd err, output: <nil>: 
	I1123 08:57:40.644045   62386 buildroot.go:172] set auth options {CertDir:/home/jenkins/minikube-integration/21966-18241/.minikube CaCertPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21966-18241/.minikube}
	I1123 08:57:40.644119   62386 buildroot.go:174] setting up certificates
	I1123 08:57:40.644132   62386 provision.go:84] configureAuth start
	I1123 08:57:40.647940   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:40.648462   62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
	I1123 08:57:40.648495   62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:40.651488   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:40.651967   62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
	I1123 08:57:40.652002   62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:40.652153   62386 provision.go:143] copyHostCerts
	I1123 08:57:40.652210   62386 exec_runner.go:144] found /home/jenkins/minikube-integration/21966-18241/.minikube/cert.pem, removing ...
	I1123 08:57:40.652252   62386 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21966-18241/.minikube/cert.pem
	I1123 08:57:40.652340   62386 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21966-18241/.minikube/cert.pem (1123 bytes)
	I1123 08:57:40.652511   62386 exec_runner.go:144] found /home/jenkins/minikube-integration/21966-18241/.minikube/key.pem, removing ...
	I1123 08:57:40.652528   62386 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21966-18241/.minikube/key.pem
	I1123 08:57:40.652580   62386 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21966-18241/.minikube/key.pem (1675 bytes)
	I1123 08:57:40.652714   62386 exec_runner.go:144] found /home/jenkins/minikube-integration/21966-18241/.minikube/ca.pem, removing ...
	I1123 08:57:40.652735   62386 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21966-18241/.minikube/ca.pem
	I1123 08:57:40.652778   62386 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21966-18241/.minikube/ca.pem (1082 bytes)
	I1123 08:57:40.652872   62386 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21966-18241/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca-key.pem org=jenkins.newest-cni-078196 san=[127.0.0.1 192.168.39.87 localhost minikube newest-cni-078196]
	I1123 08:57:40.723606   62386 provision.go:177] copyRemoteCerts
	I1123 08:57:40.723663   62386 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
	I1123 08:57:40.726615   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:40.727086   62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
	I1123 08:57:40.727115   62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:40.727301   62386 sshutil.go:53] new ssh client: &{IP:192.168.39.87 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196/id_rsa Username:docker}
	I1123 08:57:40.819420   62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
	I1123 08:57:40.852505   62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/machines/server.pem --> /etc/docker/server.pem (1220 bytes)
	I1123 08:57:40.888555   62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
	I1123 08:57:40.923977   62386 provision.go:87] duration metric: took 279.828188ms to configureAuth
	I1123 08:57:40.924014   62386 buildroot.go:189] setting minikube options for container-runtime
	I1123 08:57:40.924275   62386 config.go:182] Loaded profile config "newest-cni-078196": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
	I1123 08:57:40.927517   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:40.927915   62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
	I1123 08:57:40.927938   62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:40.928098   62386 main.go:143] libmachine: Using SSH client type: native
	I1123 08:57:40.928391   62386 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.39.87 22 <nil> <nil>}
	I1123 08:57:40.928404   62386 main.go:143] libmachine: About to run SSH command:
	df --output=fstype / | tail -n 1
	I1123 08:57:41.042673   62386 main.go:143] libmachine: SSH cmd err, output: <nil>: tmpfs
	
	I1123 08:57:41.042707   62386 buildroot.go:70] root file system type: tmpfs
	I1123 08:57:41.042873   62386 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
	I1123 08:57:41.046445   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:41.046989   62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
	I1123 08:57:41.047094   62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:41.047391   62386 main.go:143] libmachine: Using SSH client type: native
	I1123 08:57:41.047683   62386 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.39.87 22 <nil> <nil>}
	I1123 08:57:41.047769   62386 main.go:143] libmachine: About to run SSH command:
	sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
	Description=Docker Application Container Engine
	Documentation=https://docs.docker.com
	After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
	Wants=network-online.target containerd.service
	Requires=docker.socket
	StartLimitBurst=3
	StartLimitIntervalSec=60
	[Service]
	Type=notify
	Restart=always
	
	
	
	ExecStart=
	ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 \
		-H fd:// --containerd=/run/containerd/containerd.sock \
		-H unix:///var/run/docker.sock \
		--default-ulimit=nofile=1048576:1048576 \
		--tlsverify \
		--tlscacert /etc/docker/ca.pem \
		--tlscert /etc/docker/server.pem \
		--tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12 
	ExecReload=/bin/kill -s HUP \$MAINPID
	
	# Having non-zero Limit*s causes performance problems due to accounting overhead
	# in the kernel. We recommend using cgroups to do container-local accounting.
	LimitNOFILE=infinity
	LimitNPROC=infinity
	LimitCORE=infinity
	
	# Uncomment TasksMax if your systemd version supports it.
	# Only systemd 226 and above support this version.
	TasksMax=infinity
	
	# set delegate yes so that systemd does not reset the cgroups of docker containers
	Delegate=yes
	
	# kill only the docker process, not all processes in the cgroup
	KillMode=process
	OOMScoreAdjust=-500
	
	[Install]
	WantedBy=multi-user.target
	" | sudo tee /lib/systemd/system/docker.service.new
	I1123 08:57:41.175224   62386 main.go:143] libmachine: SSH cmd err, output: <nil>: [Unit]
	Description=Docker Application Container Engine
	Documentation=https://docs.docker.com
	After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
	Wants=network-online.target containerd.service
	Requires=docker.socket
	StartLimitBurst=3
	StartLimitIntervalSec=60
	[Service]
	Type=notify
	Restart=always
	
	
	
	ExecStart=
	ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 	-H fd:// --containerd=/run/containerd/containerd.sock 	-H unix:///var/run/docker.sock 	--default-ulimit=nofile=1048576:1048576 	--tlsverify 	--tlscacert /etc/docker/ca.pem 	--tlscert /etc/docker/server.pem 	--tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12 
	ExecReload=/bin/kill -s HUP $MAINPID
	
	# Having non-zero Limit*s causes performance problems due to accounting overhead
	# in the kernel. We recommend using cgroups to do container-local accounting.
	LimitNOFILE=infinity
	LimitNPROC=infinity
	LimitCORE=infinity
	
	# Uncomment TasksMax if your systemd version supports it.
	# Only systemd 226 and above support this version.
	TasksMax=infinity
	
	# set delegate yes so that systemd does not reset the cgroups of docker containers
	Delegate=yes
	
	# kill only the docker process, not all processes in the cgroup
	KillMode=process
	OOMScoreAdjust=-500
	
	[Install]
	WantedBy=multi-user.target
	
	I1123 08:57:41.178183   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:41.178676   62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
	I1123 08:57:41.178702   62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:41.178902   62386 main.go:143] libmachine: Using SSH client type: native
	I1123 08:57:41.179152   62386 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.39.87 22 <nil> <nil>}
	I1123 08:57:41.179171   62386 main.go:143] libmachine: About to run SSH command:
	sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
	I1123 08:57:42.186295   62386 main.go:143] libmachine: SSH cmd err, output: <nil>: diff: can't stat '/lib/systemd/system/docker.service': No such file or directory
	Created symlink '/etc/systemd/system/multi-user.target.wants/docker.service' → '/usr/lib/systemd/system/docker.service'.
	
	I1123 08:57:42.186331   62386 machine.go:97] duration metric: took 1.918179804s to provisionDockerMachine
	I1123 08:57:42.186347   62386 client.go:176] duration metric: took 22.997600307s to LocalClient.Create
	I1123 08:57:42.186371   62386 start.go:167] duration metric: took 22.997685492s to libmachine.API.Create "newest-cni-078196"
	I1123 08:57:42.186382   62386 start.go:293] postStartSetup for "newest-cni-078196" (driver="kvm2")
	I1123 08:57:42.186396   62386 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
	I1123 08:57:42.186475   62386 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
	I1123 08:57:42.189917   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:42.190351   62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
	I1123 08:57:42.190388   62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:42.190560   62386 sshutil.go:53] new ssh client: &{IP:192.168.39.87 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196/id_rsa Username:docker}
	I1123 08:57:42.283393   62386 ssh_runner.go:195] Run: cat /etc/os-release
	I1123 08:57:42.289999   62386 info.go:137] Remote host: Buildroot 2025.02
	I1123 08:57:42.290030   62386 filesync.go:126] Scanning /home/jenkins/minikube-integration/21966-18241/.minikube/addons for local assets ...
	I1123 08:57:42.290117   62386 filesync.go:126] Scanning /home/jenkins/minikube-integration/21966-18241/.minikube/files for local assets ...
	I1123 08:57:42.290218   62386 filesync.go:149] local asset: /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/ssl/certs/221482.pem -> 221482.pem in /etc/ssl/certs
	I1123 08:57:42.290354   62386 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
	I1123 08:57:42.306924   62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/ssl/certs/221482.pem --> /etc/ssl/certs/221482.pem (1708 bytes)
	I1123 08:57:42.343081   62386 start.go:296] duration metric: took 156.683452ms for postStartSetup
	I1123 08:57:42.347012   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:42.347579   62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
	I1123 08:57:42.347619   62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:42.347939   62386 profile.go:143] Saving config to /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/config.json ...
	I1123 08:57:42.348140   62386 start.go:128] duration metric: took 23.161911818s to createHost
	I1123 08:57:42.350835   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:42.351301   62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
	I1123 08:57:42.351336   62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:42.351513   62386 main.go:143] libmachine: Using SSH client type: native
	I1123 08:57:42.351791   62386 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.39.87 22 <nil> <nil>}
	I1123 08:57:42.351806   62386 main.go:143] libmachine: About to run SSH command:
	date +%s.%N
	I1123 08:57:42.466967   62386 main.go:143] libmachine: SSH cmd err, output: <nil>: 1763888262.440217357
	
	I1123 08:57:42.466993   62386 fix.go:216] guest clock: 1763888262.440217357
	I1123 08:57:42.467001   62386 fix.go:229] Guest: 2025-11-23 08:57:42.440217357 +0000 UTC Remote: 2025-11-23 08:57:42.348151583 +0000 UTC m=+33.279616417 (delta=92.065774ms)
	I1123 08:57:42.467025   62386 fix.go:200] guest clock delta is within tolerance: 92.065774ms
	I1123 08:57:42.467033   62386 start.go:83] releasing machines lock for "newest-cni-078196", held for 23.280957089s
	I1123 08:57:42.471032   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:42.471501   62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
	I1123 08:57:42.471531   62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:42.472456   62386 ssh_runner.go:195] Run: cat /version.json
	I1123 08:57:42.472536   62386 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
	I1123 08:57:42.477011   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:42.477058   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:42.477612   62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
	I1123 08:57:42.477644   62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:42.479664   62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
	I1123 08:57:42.479706   62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:42.480287   62386 sshutil.go:53] new ssh client: &{IP:192.168.39.87 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196/id_rsa Username:docker}
	I1123 08:57:42.480869   62386 sshutil.go:53] new ssh client: &{IP:192.168.39.87 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/newest-cni-078196/id_rsa Username:docker}
	I1123 08:57:42.593772   62386 ssh_runner.go:195] Run: systemctl --version
	I1123 08:57:42.603410   62386 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
	W1123 08:57:42.614510   62386 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
	I1123 08:57:42.614601   62386 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
	I1123 08:57:42.645967   62386 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
	I1123 08:57:42.646003   62386 start.go:496] detecting cgroup driver to use...
	I1123 08:57:42.646138   62386 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
	" | sudo tee /etc/crictl.yaml"
	I1123 08:57:42.678706   62386 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
	I1123 08:57:42.694705   62386 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
	I1123 08:57:42.713341   62386 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
	I1123 08:57:42.713419   62386 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
	I1123 08:57:42.729085   62386 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
	I1123 08:57:42.747983   62386 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
	I1123 08:57:42.768036   62386 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
	I1123 08:57:42.784061   62386 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
	I1123 08:57:42.803711   62386 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
	I1123 08:57:42.822385   62386 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
	I1123 08:57:42.837748   62386 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1  enable_unprivileged_ports = true|' /etc/containerd/config.toml"
	I1123 08:57:42.858942   62386 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
	I1123 08:57:42.873841   62386 crio.go:166] couldn't verify netfilter by "sudo sysctl net.bridge.bridge-nf-call-iptables" which might be okay. error: sudo sysctl net.bridge.bridge-nf-call-iptables: Process exited with status 1
	stdout:
	
	stderr:
	sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
	I1123 08:57:42.873924   62386 ssh_runner.go:195] Run: sudo modprobe br_netfilter
	I1123 08:57:42.888503   62386 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
	I1123 08:57:42.902894   62386 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1123 08:57:43.087215   62386 ssh_runner.go:195] Run: sudo systemctl restart containerd
	I1123 08:57:43.137011   62386 start.go:496] detecting cgroup driver to use...
	I1123 08:57:43.137115   62386 ssh_runner.go:195] Run: sudo systemctl cat docker.service
	I1123 08:57:43.166541   62386 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
	I1123 08:57:43.198142   62386 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
	I1123 08:57:43.220890   62386 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
	I1123 08:57:43.239791   62386 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
	I1123 08:57:43.260304   62386 ssh_runner.go:195] Run: sudo systemctl stop -f crio
	I1123 08:57:43.296702   62386 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
	I1123 08:57:43.316993   62386 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
	" | sudo tee /etc/crictl.yaml"
	I1123 08:57:43.348493   62386 ssh_runner.go:195] Run: which cri-dockerd
	I1123 08:57:43.353715   62386 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
	I1123 08:57:43.367872   62386 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (192 bytes)
	I1123 08:57:43.391806   62386 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
	I1123 08:57:43.570922   62386 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
	I1123 08:57:43.771497   62386 docker.go:575] configuring docker to use "cgroupfs" as cgroup driver...
	I1123 08:57:43.771641   62386 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
	I1123 08:57:43.796840   62386 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
	I1123 08:57:43.815699   62386 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1123 08:57:43.997592   62386 ssh_runner.go:195] Run: sudo systemctl restart docker
	I1123 08:57:44.541819   62386 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
	I1123 08:57:44.559735   62386 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
	I1123 08:57:44.577562   62386 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
	I1123 08:57:44.595133   62386 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
	I1123 08:57:44.759253   62386 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
	I1123 08:57:44.927897   62386 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1123 08:57:45.126443   62386 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
	I1123 08:57:45.161272   62386 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
	I1123 08:57:45.179561   62386 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1123 08:57:45.365439   62386 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
	I1123 08:57:45.512591   62386 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
	I1123 08:57:45.537318   62386 start.go:543] Will wait 60s for socket path /var/run/cri-dockerd.sock
	I1123 08:57:45.537393   62386 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
	I1123 08:57:45.546577   62386 start.go:564] Will wait 60s for crictl version
	I1123 08:57:45.546657   62386 ssh_runner.go:195] Run: which crictl
	I1123 08:57:45.553243   62386 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
	I1123 08:57:45.597074   62386 start.go:580] Version:  0.1.0
	RuntimeName:  docker
	RuntimeVersion:  28.5.1
	RuntimeApiVersion:  v1
	I1123 08:57:45.597163   62386 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
	I1123 08:57:45.640023   62386 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
	I1123 08:57:45.668409   62386 out.go:252] * Preparing Kubernetes v1.34.1 on Docker 28.5.1 ...
	I1123 08:57:45.671742   62386 main.go:143] libmachine: domain newest-cni-078196 has defined MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:45.672152   62386 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:d7:c1:0d", ip: ""} in network mk-newest-cni-078196: {Iface:virbr1 ExpiryTime:2025-11-23 09:57:35 +0000 UTC Type:0 Mac:52:54:00:d7:c1:0d Iaid: IPaddr:192.168.39.87 Prefix:24 Hostname:newest-cni-078196 Clientid:01:52:54:00:d7:c1:0d}
	I1123 08:57:45.672174   62386 main.go:143] libmachine: domain newest-cni-078196 has defined IP address 192.168.39.87 and MAC address 52:54:00:d7:c1:0d in network mk-newest-cni-078196
	I1123 08:57:45.672386   62386 ssh_runner.go:195] Run: grep 192.168.39.1	host.minikube.internal$ /etc/hosts
	I1123 08:57:45.677208   62386 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.39.1	host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
	I1123 08:57:45.697750   62386 out.go:179]   - kubeadm.pod-network-cidr=10.42.0.0/16
	I1123 08:57:42.471379   62480 out.go:252] * Restarting existing kvm2 VM for "default-k8s-diff-port-925051" ...
	I1123 08:57:42.471439   62480 main.go:143] libmachine: starting domain...
	I1123 08:57:42.471451   62480 main.go:143] libmachine: ensuring networks are active...
	I1123 08:57:42.472371   62480 main.go:143] libmachine: Ensuring network default is active
	I1123 08:57:42.473208   62480 main.go:143] libmachine: Ensuring network mk-default-k8s-diff-port-925051 is active
	I1123 08:57:42.474158   62480 main.go:143] libmachine: getting domain XML...
	I1123 08:57:42.476521   62480 main.go:143] libmachine: starting domain XML:
	<domain type='kvm'>
	  <name>default-k8s-diff-port-925051</name>
	  <uuid>faa8704c-25e4-4eae-b827-cb508c4f9f54</uuid>
	  <memory unit='KiB'>3145728</memory>
	  <currentMemory unit='KiB'>3145728</currentMemory>
	  <vcpu placement='static'>2</vcpu>
	  <os>
	    <type arch='x86_64' machine='pc-i440fx-jammy'>hvm</type>
	    <boot dev='cdrom'/>
	    <boot dev='hd'/>
	    <bootmenu enable='no'/>
	  </os>
	  <features>
	    <acpi/>
	    <apic/>
	    <pae/>
	  </features>
	  <cpu mode='host-passthrough' check='none' migratable='on'/>
	  <clock offset='utc'/>
	  <on_poweroff>destroy</on_poweroff>
	  <on_reboot>restart</on_reboot>
	  <on_crash>destroy</on_crash>
	  <devices>
	    <emulator>/usr/bin/qemu-system-x86_64</emulator>
	    <disk type='file' device='cdrom'>
	      <driver name='qemu' type='raw'/>
	      <source file='/home/jenkins/minikube-integration/21966-18241/.minikube/machines/default-k8s-diff-port-925051/boot2docker.iso'/>
	      <target dev='hdc' bus='scsi'/>
	      <readonly/>
	      <address type='drive' controller='0' bus='0' target='0' unit='2'/>
	    </disk>
	    <disk type='file' device='disk'>
	      <driver name='qemu' type='raw' io='threads'/>
	      <source file='/home/jenkins/minikube-integration/21966-18241/.minikube/machines/default-k8s-diff-port-925051/default-k8s-diff-port-925051.rawdisk'/>
	      <target dev='hda' bus='virtio'/>
	      <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
	    </disk>
	    <controller type='usb' index='0' model='piix3-uhci'>
	      <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>
	    </controller>
	    <controller type='pci' index='0' model='pci-root'/>
	    <controller type='scsi' index='0' model='lsilogic'>
	      <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
	    </controller>
	    <interface type='network'>
	      <mac address='52:54:00:19:c7:db'/>
	      <source network='mk-default-k8s-diff-port-925051'/>
	      <model type='virtio'/>
	      <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
	    </interface>
	    <interface type='network'>
	      <mac address='52:54:00:fd:c0:c5'/>
	      <source network='default'/>
	      <model type='virtio'/>
	      <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
	    </interface>
	    <serial type='pty'>
	      <target type='isa-serial' port='0'>
	        <model name='isa-serial'/>
	      </target>
	    </serial>
	    <console type='pty'>
	      <target type='serial' port='0'/>
	    </console>
	    <input type='mouse' bus='ps2'/>
	    <input type='keyboard' bus='ps2'/>
	    <audio id='1' type='none'/>
	    <memballoon model='virtio'>
	      <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
	    </memballoon>
	    <rng model='virtio'>
	      <backend model='random'>/dev/random</backend>
	      <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
	    </rng>
	  </devices>
	</domain>
	
	I1123 08:57:44.035948   62480 main.go:143] libmachine: waiting for domain to start...
	I1123 08:57:44.037946   62480 main.go:143] libmachine: domain is now running
	I1123 08:57:44.037965   62480 main.go:143] libmachine: waiting for IP...
	I1123 08:57:44.039014   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:57:44.039860   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has current primary IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:57:44.039874   62480 main.go:143] libmachine: found domain IP: 192.168.83.137
	I1123 08:57:44.039880   62480 main.go:143] libmachine: reserving static IP address...
	I1123 08:57:44.040364   62480 main.go:143] libmachine: found host DHCP lease matching {name: "default-k8s-diff-port-925051", mac: "52:54:00:19:c7:db", ip: "192.168.83.137"} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:55:37 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
	I1123 08:57:44.040404   62480 main.go:143] libmachine: skip adding static IP to network mk-default-k8s-diff-port-925051 - found existing host DHCP lease matching {name: "default-k8s-diff-port-925051", mac: "52:54:00:19:c7:db", ip: "192.168.83.137"}
	I1123 08:57:44.040416   62480 main.go:143] libmachine: reserved static IP address 192.168.83.137 for domain default-k8s-diff-port-925051
	I1123 08:57:44.040421   62480 main.go:143] libmachine: waiting for SSH...
	I1123 08:57:44.040425   62480 main.go:143] libmachine: Getting to WaitForSSH function...
	I1123 08:57:44.043072   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:57:44.043526   62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:55:37 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
	I1123 08:57:44.043551   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:57:44.043747   62480 main.go:143] libmachine: Using SSH client type: native
	I1123 08:57:44.044097   62480 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.83.137 22 <nil> <nil>}
	I1123 08:57:44.044119   62480 main.go:143] libmachine: About to run SSH command:
	exit 0
	W1123 08:57:43.874417   62034 node_ready.go:57] node "embed-certs-059363" has "Ready":"False" status (will retry)
	I1123 08:57:44.875063   62034 node_ready.go:49] node "embed-certs-059363" is "Ready"
	I1123 08:57:44.875101   62034 node_ready.go:38] duration metric: took 8.005319911s for node "embed-certs-059363" to be "Ready" ...
	I1123 08:57:44.875126   62034 api_server.go:52] waiting for apiserver process to appear ...
	I1123 08:57:44.875194   62034 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I1123 08:57:44.908964   62034 api_server.go:72] duration metric: took 8.381553502s to wait for apiserver process to appear ...
	I1123 08:57:44.908993   62034 api_server.go:88] waiting for apiserver healthz status ...
	I1123 08:57:44.909013   62034 api_server.go:253] Checking apiserver healthz at https://192.168.72.170:8443/healthz ...
	I1123 08:57:44.924580   62034 api_server.go:279] https://192.168.72.170:8443/healthz returned 200:
	ok
	I1123 08:57:44.927212   62034 api_server.go:141] control plane version: v1.34.1
	I1123 08:57:44.927254   62034 api_server.go:131] duration metric: took 18.252447ms to wait for apiserver health ...
	I1123 08:57:44.927266   62034 system_pods.go:43] waiting for kube-system pods to appear ...
	I1123 08:57:44.936682   62034 system_pods.go:59] 8 kube-system pods found
	I1123 08:57:44.936719   62034 system_pods.go:61] "coredns-66bc5c9577-665gz" [95fc7e21-4842-4c82-8e6a-aacd9494cdaf] Running
	I1123 08:57:44.936727   62034 system_pods.go:61] "etcd-embed-certs-059363" [fa029d3b-b887-4f84-9479-84020bb36c03] Running
	I1123 08:57:44.936746   62034 system_pods.go:61] "kube-apiserver-embed-certs-059363" [4949b4bd-7e15-4092-90e1-215419673b50] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
	I1123 08:57:44.936754   62034 system_pods.go:61] "kube-controller-manager-embed-certs-059363" [4bf4b11c-274e-4bc4-b4f7-39b40f9ea51b] Running
	I1123 08:57:44.936762   62034 system_pods.go:61] "kube-proxy-sjvcr" [73a4ab24-78f1-4223-9e4b-fbf39c225875] Running
	I1123 08:57:44.936772   62034 system_pods.go:61] "kube-scheduler-embed-certs-059363" [2ad27af2-3f59-44b5-b888-c5fee6b5db68] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
	I1123 08:57:44.936780   62034 system_pods.go:61] "metrics-server-746fcd58dc-jc8k8" [93a43ecf-712d-44ba-a709-9bc223d0990e] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
	I1123 08:57:44.936786   62034 system_pods.go:61] "storage-provisioner" [3a6c5ffc-b8ab-4fc3-bdaa-048e59ab4766] Running
	I1123 08:57:44.936794   62034 system_pods.go:74] duration metric: took 9.520766ms to wait for pod list to return data ...
	I1123 08:57:44.936804   62034 default_sa.go:34] waiting for default service account to be created ...
	I1123 08:57:44.948188   62034 default_sa.go:45] found service account: "default"
	I1123 08:57:44.948225   62034 default_sa.go:55] duration metric: took 11.401143ms for default service account to be created ...
	I1123 08:57:44.948255   62034 system_pods.go:116] waiting for k8s-apps to be running ...
	I1123 08:57:44.951719   62034 system_pods.go:86] 8 kube-system pods found
	I1123 08:57:44.951754   62034 system_pods.go:89] "coredns-66bc5c9577-665gz" [95fc7e21-4842-4c82-8e6a-aacd9494cdaf] Running
	I1123 08:57:44.951774   62034 system_pods.go:89] "etcd-embed-certs-059363" [fa029d3b-b887-4f84-9479-84020bb36c03] Running
	I1123 08:57:44.951787   62034 system_pods.go:89] "kube-apiserver-embed-certs-059363" [4949b4bd-7e15-4092-90e1-215419673b50] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
	I1123 08:57:44.951803   62034 system_pods.go:89] "kube-controller-manager-embed-certs-059363" [4bf4b11c-274e-4bc4-b4f7-39b40f9ea51b] Running
	I1123 08:57:44.951812   62034 system_pods.go:89] "kube-proxy-sjvcr" [73a4ab24-78f1-4223-9e4b-fbf39c225875] Running
	I1123 08:57:44.951821   62034 system_pods.go:89] "kube-scheduler-embed-certs-059363" [2ad27af2-3f59-44b5-b888-c5fee6b5db68] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
	I1123 08:57:44.951837   62034 system_pods.go:89] "metrics-server-746fcd58dc-jc8k8" [93a43ecf-712d-44ba-a709-9bc223d0990e] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
	I1123 08:57:44.951850   62034 system_pods.go:89] "storage-provisioner" [3a6c5ffc-b8ab-4fc3-bdaa-048e59ab4766] Running
	I1123 08:57:44.951862   62034 system_pods.go:126] duration metric: took 3.598572ms to wait for k8s-apps to be running ...
	I1123 08:57:44.951872   62034 system_svc.go:44] waiting for kubelet service to be running ....
	I1123 08:57:44.951940   62034 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I1123 08:57:44.981007   62034 system_svc.go:56] duration metric: took 29.122206ms WaitForService to wait for kubelet
	I1123 08:57:44.981059   62034 kubeadm.go:587] duration metric: took 8.453653674s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
	I1123 08:57:44.981082   62034 node_conditions.go:102] verifying NodePressure condition ...
	I1123 08:57:44.985604   62034 node_conditions.go:122] node storage ephemeral capacity is 17734596Ki
	I1123 08:57:44.985627   62034 node_conditions.go:123] node cpu capacity is 2
	I1123 08:57:44.985639   62034 node_conditions.go:105] duration metric: took 4.549928ms to run NodePressure ...
	I1123 08:57:44.985653   62034 start.go:242] waiting for startup goroutines ...
	I1123 08:57:44.985663   62034 start.go:247] waiting for cluster config update ...
	I1123 08:57:44.985678   62034 start.go:256] writing updated cluster config ...
	I1123 08:57:44.986007   62034 ssh_runner.go:195] Run: rm -f paused
	I1123 08:57:44.992429   62034 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
	I1123 08:57:44.997825   62034 pod_ready.go:83] waiting for pod "coredns-66bc5c9577-665gz" in "kube-system" namespace to be "Ready" or be gone ...
	I1123 08:57:45.005294   62034 pod_ready.go:94] pod "coredns-66bc5c9577-665gz" is "Ready"
	I1123 08:57:45.005321   62034 pod_ready.go:86] duration metric: took 7.470836ms for pod "coredns-66bc5c9577-665gz" in "kube-system" namespace to be "Ready" or be gone ...
	I1123 08:57:45.008602   62034 pod_ready.go:83] waiting for pod "etcd-embed-certs-059363" in "kube-system" namespace to be "Ready" or be gone ...
	I1123 08:57:45.017355   62034 pod_ready.go:94] pod "etcd-embed-certs-059363" is "Ready"
	I1123 08:57:45.017385   62034 pod_ready.go:86] duration metric: took 8.758566ms for pod "etcd-embed-certs-059363" in "kube-system" namespace to be "Ready" or be gone ...
	I1123 08:57:45.020737   62034 pod_ready.go:83] waiting for pod "kube-apiserver-embed-certs-059363" in "kube-system" namespace to be "Ready" or be gone ...
	W1123 08:57:47.036716   62034 pod_ready.go:104] pod "kube-apiserver-embed-certs-059363" is not "Ready", error: <nil>
	I1123 08:57:45.699160   62386 kubeadm.go:884] updating cluster {Name:newest-cni-078196 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21924/minikube-v1.37.0-1763503576-21924-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1
.34.1 ClusterName:newest-cni-078196 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:kubeadm Key:pod-network-cidr Value:10.42.0.0/16}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.39.87 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:false default_sa:true extra:false kubelet:false node_ready:false system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144
MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
	I1123 08:57:45.699335   62386 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime docker
	I1123 08:57:45.699438   62386 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
	I1123 08:57:45.722240   62386 docker.go:691] Got preloaded images: 
	I1123 08:57:45.722266   62386 docker.go:697] registry.k8s.io/kube-apiserver:v1.34.1 wasn't preloaded
	I1123 08:57:45.722318   62386 ssh_runner.go:195] Run: sudo cat /var/lib/docker/image/overlay2/repositories.json
	I1123 08:57:45.737539   62386 ssh_runner.go:195] Run: which lz4
	I1123 08:57:45.742521   62386 ssh_runner.go:195] Run: stat -c "%s %y" /preloaded.tar.lz4
	I1123 08:57:45.748122   62386 ssh_runner.go:352] existence check for /preloaded.tar.lz4: stat -c "%s %y" /preloaded.tar.lz4: Process exited with status 1
	stdout:
	
	stderr:
	stat: cannot statx '/preloaded.tar.lz4': No such file or directory
	I1123 08:57:45.748156   62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-docker-overlay2-amd64.tar.lz4 --> /preloaded.tar.lz4 (353378914 bytes)
	I1123 08:57:47.397908   62386 docker.go:655] duration metric: took 1.655425847s to copy over tarball
	I1123 08:57:47.398050   62386 ssh_runner.go:195] Run: sudo tar --xattrs --xattrs-include security.capability -I lz4 -C /var -xf /preloaded.tar.lz4
	I1123 08:57:49.041182   62386 ssh_runner.go:235] Completed: sudo tar --xattrs --xattrs-include security.capability -I lz4 -C /var -xf /preloaded.tar.lz4: (1.643095229s)
	I1123 08:57:49.041212   62386 ssh_runner.go:146] rm: /preloaded.tar.lz4
	I1123 08:57:49.084378   62386 ssh_runner.go:195] Run: sudo cat /var/lib/docker/image/overlay2/repositories.json
	I1123 08:57:49.103760   62386 ssh_runner.go:362] scp memory --> /var/lib/docker/image/overlay2/repositories.json (2632 bytes)
	W1123 08:57:49.601859   62034 pod_ready.go:104] pod "kube-apiserver-embed-certs-059363" is not "Ready", error: <nil>
	I1123 08:57:50.104106   62034 pod_ready.go:94] pod "kube-apiserver-embed-certs-059363" is "Ready"
	I1123 08:57:50.104158   62034 pod_ready.go:86] duration metric: took 5.08337291s for pod "kube-apiserver-embed-certs-059363" in "kube-system" namespace to be "Ready" or be gone ...
	I1123 08:57:50.107546   62034 pod_ready.go:83] waiting for pod "kube-controller-manager-embed-certs-059363" in "kube-system" namespace to be "Ready" or be gone ...
	I1123 08:57:50.115455   62034 pod_ready.go:94] pod "kube-controller-manager-embed-certs-059363" is "Ready"
	I1123 08:57:50.115500   62034 pod_ready.go:86] duration metric: took 7.928459ms for pod "kube-controller-manager-embed-certs-059363" in "kube-system" namespace to be "Ready" or be gone ...
	I1123 08:57:50.119972   62034 pod_ready.go:83] waiting for pod "kube-proxy-sjvcr" in "kube-system" namespace to be "Ready" or be gone ...
	I1123 08:57:50.127595   62034 pod_ready.go:94] pod "kube-proxy-sjvcr" is "Ready"
	I1123 08:57:50.127628   62034 pod_ready.go:86] duration metric: took 7.626091ms for pod "kube-proxy-sjvcr" in "kube-system" namespace to be "Ready" or be gone ...
	I1123 08:57:50.773984   62034 pod_ready.go:83] waiting for pod "kube-scheduler-embed-certs-059363" in "kube-system" namespace to be "Ready" or be gone ...
	I1123 08:57:50.786424   62034 pod_ready.go:94] pod "kube-scheduler-embed-certs-059363" is "Ready"
	I1123 08:57:50.786450   62034 pod_ready.go:86] duration metric: took 12.434457ms for pod "kube-scheduler-embed-certs-059363" in "kube-system" namespace to be "Ready" or be gone ...
	I1123 08:57:50.786464   62034 pod_ready.go:40] duration metric: took 5.79400818s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
	I1123 08:57:50.838926   62034 start.go:625] kubectl: 1.34.2, cluster: 1.34.1 (minor skew: 0)
	I1123 08:57:50.918780   62034 out.go:179] * Done! kubectl is now configured to use "embed-certs-059363" cluster and "default" namespace by default
	I1123 08:57:47.146461   62480 main.go:143] libmachine: Error dialing TCP: dial tcp 192.168.83.137:22: connect: no route to host
	I1123 08:57:49.133800   62386 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
	I1123 08:57:49.157740   62386 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1123 08:57:49.330628   62386 ssh_runner.go:195] Run: sudo systemctl restart docker
	I1123 08:57:52.066864   62386 ssh_runner.go:235] Completed: sudo systemctl restart docker: (2.736192658s)
	I1123 08:57:52.066973   62386 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
	I1123 08:57:52.092926   62386 docker.go:691] Got preloaded images: -- stdout --
	registry.k8s.io/kube-apiserver:v1.34.1
	registry.k8s.io/kube-controller-manager:v1.34.1
	registry.k8s.io/kube-scheduler:v1.34.1
	registry.k8s.io/kube-proxy:v1.34.1
	registry.k8s.io/etcd:3.6.4-0
	registry.k8s.io/pause:3.10.1
	registry.k8s.io/coredns/coredns:v1.12.1
	gcr.io/k8s-minikube/storage-provisioner:v5
	
	-- /stdout --
	I1123 08:57:52.092950   62386 cache_images.go:86] Images are preloaded, skipping loading
	I1123 08:57:52.092962   62386 kubeadm.go:935] updating node { 192.168.39.87 8443 v1.34.1 docker true true} ...
	I1123 08:57:52.093116   62386 kubeadm.go:947] kubelet [Unit]
	Wants=docker.socket
	
	[Service]
	ExecStart=
	ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=newest-cni-078196 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.39.87
	
	[Install]
	 config:
	{KubernetesVersion:v1.34.1 ClusterName:newest-cni-078196 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:kubeadm Key:pod-network-cidr Value:10.42.0.0/16}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
	I1123 08:57:52.093201   62386 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
	I1123 08:57:52.154769   62386 cni.go:84] Creating CNI manager for ""
	I1123 08:57:52.154816   62386 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
	I1123 08:57:52.154857   62386 kubeadm.go:85] Using pod CIDR: 10.42.0.0/16
	I1123 08:57:52.154889   62386 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.42.0.0/16 AdvertiseAddress:192.168.39.87 APIServerPort:8443 KubernetesVersion:v1.34.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:newest-cni-078196 NodeName:newest-cni-078196 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.39.87"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.39.87 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:
/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
	I1123 08:57:52.155043   62386 kubeadm.go:196] kubeadm config:
	apiVersion: kubeadm.k8s.io/v1beta4
	kind: InitConfiguration
	localAPIEndpoint:
	  advertiseAddress: 192.168.39.87
	  bindPort: 8443
	bootstrapTokens:
	  - groups:
	      - system:bootstrappers:kubeadm:default-node-token
	    ttl: 24h0m0s
	    usages:
	      - signing
	      - authentication
	nodeRegistration:
	  criSocket: unix:///var/run/cri-dockerd.sock
	  name: "newest-cni-078196"
	  kubeletExtraArgs:
	    - name: "node-ip"
	      value: "192.168.39.87"
	  taints: []
	---
	apiVersion: kubeadm.k8s.io/v1beta4
	kind: ClusterConfiguration
	apiServer:
	  certSANs: ["127.0.0.1", "localhost", "192.168.39.87"]
	  extraArgs:
	    - name: "enable-admission-plugins"
	      value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
	controllerManager:
	  extraArgs:
	    - name: "allocate-node-cidrs"
	      value: "true"
	    - name: "leader-elect"
	      value: "false"
	scheduler:
	  extraArgs:
	    - name: "leader-elect"
	      value: "false"
	certificatesDir: /var/lib/minikube/certs
	clusterName: mk
	controlPlaneEndpoint: control-plane.minikube.internal:8443
	etcd:
	  local:
	    dataDir: /var/lib/minikube/etcd
	kubernetesVersion: v1.34.1
	networking:
	  dnsDomain: cluster.local
	  podSubnet: "10.42.0.0/16"
	  serviceSubnet: 10.96.0.0/12
	---
	apiVersion: kubelet.config.k8s.io/v1beta1
	kind: KubeletConfiguration
	authentication:
	  x509:
	    clientCAFile: /var/lib/minikube/certs/ca.crt
	cgroupDriver: cgroupfs
	containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
	hairpinMode: hairpin-veth
	runtimeRequestTimeout: 15m
	clusterDomain: "cluster.local"
	# disable disk resource management by default
	imageGCHighThresholdPercent: 100
	evictionHard:
	  nodefs.available: "0%"
	  nodefs.inodesFree: "0%"
	  imagefs.available: "0%"
	failSwapOn: false
	staticPodPath: /etc/kubernetes/manifests
	---
	apiVersion: kubeproxy.config.k8s.io/v1alpha1
	kind: KubeProxyConfiguration
	clusterCIDR: "10.42.0.0/16"
	metricsBindAddress: 0.0.0.0:10249
	conntrack:
	  maxPerCore: 0
	# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
	  tcpEstablishedTimeout: 0s
	# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
	  tcpCloseWaitTimeout: 0s
	
	I1123 08:57:52.155124   62386 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.1
	I1123 08:57:52.170649   62386 binaries.go:51] Found k8s binaries, skipping transfer
	I1123 08:57:52.170739   62386 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
	I1123 08:57:52.186437   62386 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (317 bytes)
	I1123 08:57:52.209956   62386 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
	I1123 08:57:52.238732   62386 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2219 bytes)
	I1123 08:57:52.263556   62386 ssh_runner.go:195] Run: grep 192.168.39.87	control-plane.minikube.internal$ /etc/hosts
	I1123 08:57:52.269016   62386 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.39.87	control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
	I1123 08:57:52.291438   62386 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1123 08:57:52.468471   62386 ssh_runner.go:195] Run: sudo systemctl start kubelet
	I1123 08:57:52.523082   62386 certs.go:69] Setting up /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196 for IP: 192.168.39.87
	I1123 08:57:52.523106   62386 certs.go:195] generating shared ca certs ...
	I1123 08:57:52.523125   62386 certs.go:227] acquiring lock for ca certs: {Name:mk4438f2b659811ea2f01e009d28f1b857a5024c Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1123 08:57:52.523320   62386 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21966-18241/.minikube/ca.key
	I1123 08:57:52.523383   62386 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21966-18241/.minikube/proxy-client-ca.key
	I1123 08:57:52.523392   62386 certs.go:257] generating profile certs ...
	I1123 08:57:52.523458   62386 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/client.key
	I1123 08:57:52.523471   62386 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/client.crt with IP's: []
	I1123 08:57:52.657113   62386 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/client.crt ...
	I1123 08:57:52.657156   62386 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/client.crt: {Name:mkd4a2297a388c5353f24d63692a9eca2de3895a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1123 08:57:52.657425   62386 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/client.key ...
	I1123 08:57:52.657447   62386 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/client.key: {Name:mk97d3b4437d9c086044675cf55d01816d40a112 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1123 08:57:52.657646   62386 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.key.3441cee4
	I1123 08:57:52.657673   62386 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.crt.3441cee4 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.39.87]
	I1123 08:57:52.753683   62386 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.crt.3441cee4 ...
	I1123 08:57:52.753714   62386 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.crt.3441cee4: {Name:mkbf555d613a4fba5c26a5d85e984e69fa19d66f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1123 08:57:52.753910   62386 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.key.3441cee4 ...
	I1123 08:57:52.753929   62386 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.key.3441cee4: {Name:mk86a1d3d78eb2290d7da0f96ec23ec9d83a7382 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1123 08:57:52.754031   62386 certs.go:382] copying /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.crt.3441cee4 -> /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.crt
	I1123 08:57:52.754133   62386 certs.go:386] copying /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.key.3441cee4 -> /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.key
	I1123 08:57:52.754190   62386 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/proxy-client.key
	I1123 08:57:52.754206   62386 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/proxy-client.crt with IP's: []
	I1123 08:57:52.860620   62386 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/proxy-client.crt ...
	I1123 08:57:52.860647   62386 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/proxy-client.crt: {Name:mk8319204c666212061b0efe79d3f0da238ee7e7 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1123 08:57:52.860851   62386 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/proxy-client.key ...
	I1123 08:57:52.860877   62386 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/proxy-client.key: {Name:mk66bf3abe86bc12c3af12e371d390dfcbb94d6a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1123 08:57:52.861117   62386 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/22148.pem (1338 bytes)
	W1123 08:57:52.861164   62386 certs.go:480] ignoring /home/jenkins/minikube-integration/21966-18241/.minikube/certs/22148_empty.pem, impossibly tiny 0 bytes
	I1123 08:57:52.861180   62386 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca-key.pem (1675 bytes)
	I1123 08:57:52.861225   62386 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem (1082 bytes)
	I1123 08:57:52.861277   62386 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/cert.pem (1123 bytes)
	I1123 08:57:52.861316   62386 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/key.pem (1675 bytes)
	I1123 08:57:52.861376   62386 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/ssl/certs/221482.pem (1708 bytes)
	I1123 08:57:52.861976   62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
	I1123 08:57:52.899377   62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
	I1123 08:57:52.931761   62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
	I1123 08:57:52.966281   62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
	I1123 08:57:53.007390   62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
	I1123 08:57:53.044942   62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
	I1123 08:57:53.087195   62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
	I1123 08:57:53.132412   62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/newest-cni-078196/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
	I1123 08:57:53.183547   62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
	I1123 08:57:53.239854   62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/certs/22148.pem --> /usr/share/ca-certificates/22148.pem (1338 bytes)
	I1123 08:57:53.286333   62386 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/ssl/certs/221482.pem --> /usr/share/ca-certificates/221482.pem (1708 bytes)
	I1123 08:57:53.334114   62386 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
	I1123 08:57:53.368550   62386 ssh_runner.go:195] Run: openssl version
	I1123 08:57:53.379200   62386 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/221482.pem && ln -fs /usr/share/ca-certificates/221482.pem /etc/ssl/certs/221482.pem"
	I1123 08:57:53.402310   62386 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/221482.pem
	I1123 08:57:53.409135   62386 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 23 08:02 /usr/share/ca-certificates/221482.pem
	I1123 08:57:53.409206   62386 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/221482.pem
	I1123 08:57:53.420776   62386 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/221482.pem /etc/ssl/certs/3ec20f2e.0"
	I1123 08:57:53.439668   62386 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
	I1123 08:57:53.455152   62386 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
	I1123 08:57:53.463920   62386 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 23 07:56 /usr/share/ca-certificates/minikubeCA.pem
	I1123 08:57:53.463999   62386 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
	I1123 08:57:53.476317   62386 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
	I1123 08:57:53.500779   62386 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/22148.pem && ln -fs /usr/share/ca-certificates/22148.pem /etc/ssl/certs/22148.pem"
	I1123 08:57:53.518199   62386 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/22148.pem
	I1123 08:57:53.524305   62386 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 23 08:02 /usr/share/ca-certificates/22148.pem
	I1123 08:57:53.524381   62386 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/22148.pem
	I1123 08:57:53.535728   62386 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/22148.pem /etc/ssl/certs/51391683.0"
	I1123 08:57:53.552096   62386 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
	I1123 08:57:53.560216   62386 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
	stdout:
	
	stderr:
	stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
	I1123 08:57:53.560306   62386 kubeadm.go:401] StartCluster: {Name:newest-cni-078196 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21924/minikube-v1.37.0-1763503576-21924-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34
.1 ClusterName:newest-cni-078196 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:kubeadm Key:pod-network-cidr Value:10.42.0.0/16}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.39.87 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:false default_sa:true extra:false kubelet:false node_ready:false system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 Mou
ntOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I1123 08:57:53.560470   62386 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
	I1123 08:57:53.580412   62386 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
	I1123 08:57:53.596570   62386 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
	I1123 08:57:53.611293   62386 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
	I1123 08:57:53.630652   62386 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
	stdout:
	
	stderr:
	ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
	ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
	ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
	ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
	I1123 08:57:53.630673   62386 kubeadm.go:158] found existing configuration files:
	
	I1123 08:57:53.630721   62386 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
	I1123 08:57:53.648350   62386 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
	stdout:
	
	stderr:
	grep: /etc/kubernetes/admin.conf: No such file or directory
	I1123 08:57:53.648419   62386 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
	I1123 08:57:53.668086   62386 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
	I1123 08:57:53.682346   62386 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
	stdout:
	
	stderr:
	grep: /etc/kubernetes/kubelet.conf: No such file or directory
	I1123 08:57:53.682427   62386 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
	I1123 08:57:53.696036   62386 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
	I1123 08:57:53.708650   62386 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
	stdout:
	
	stderr:
	grep: /etc/kubernetes/controller-manager.conf: No such file or directory
	I1123 08:57:53.708729   62386 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
	I1123 08:57:53.721869   62386 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
	I1123 08:57:53.733930   62386 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
	stdout:
	
	stderr:
	grep: /etc/kubernetes/scheduler.conf: No such file or directory
	I1123 08:57:53.734006   62386 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
	I1123 08:57:53.747563   62386 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml  --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem"
	I1123 08:57:53.803699   62386 kubeadm.go:319] [init] Using Kubernetes version: v1.34.1
	I1123 08:57:53.803788   62386 kubeadm.go:319] [preflight] Running pre-flight checks
	I1123 08:57:53.933708   62386 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
	I1123 08:57:53.933907   62386 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
	I1123 08:57:53.934039   62386 kubeadm.go:319] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
	I1123 08:57:53.957595   62386 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
	I1123 08:57:53.960282   62386 out.go:252]   - Generating certificates and keys ...
	I1123 08:57:53.960381   62386 kubeadm.go:319] [certs] Using existing ca certificate authority
	I1123 08:57:53.960461   62386 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
	I1123 08:57:53.226464   62480 main.go:143] libmachine: Error dialing TCP: dial tcp 192.168.83.137:22: connect: no route to host
	I1123 08:57:54.308839   62386 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
	I1123 08:57:54.462473   62386 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
	I1123 08:57:54.656673   62386 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
	I1123 08:57:55.051656   62386 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
	I1123 08:57:55.893313   62386 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
	I1123 08:57:55.893649   62386 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [localhost newest-cni-078196] and IPs [192.168.39.87 127.0.0.1 ::1]
	I1123 08:57:56.010218   62386 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
	I1123 08:57:56.010458   62386 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [localhost newest-cni-078196] and IPs [192.168.39.87 127.0.0.1 ::1]
	I1123 08:57:56.117087   62386 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
	I1123 08:57:56.436611   62386 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
	I1123 08:57:56.745597   62386 kubeadm.go:319] [certs] Generating "sa" key and public key
	I1123 08:57:56.745835   62386 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
	I1123 08:57:56.988789   62386 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
	I1123 08:57:57.476516   62386 kubeadm.go:319] [kubeconfig] Writing "super-admin.conf" kubeconfig file
	I1123 08:57:57.662890   62386 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
	I1123 08:57:58.001771   62386 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
	I1123 08:57:58.199479   62386 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
	I1123 08:57:58.201506   62386 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
	I1123 08:57:58.204309   62386 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
	I1123 08:57:58.206280   62386 out.go:252]   - Booting up control plane ...
	I1123 08:57:58.206413   62386 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
	I1123 08:57:58.206524   62386 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
	I1123 08:57:58.206622   62386 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
	I1123 08:57:58.225366   62386 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
	I1123 08:57:58.225656   62386 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
	I1123 08:57:58.233945   62386 kubeadm.go:319] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
	I1123 08:57:58.234118   62386 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
	I1123 08:57:58.234179   62386 kubeadm.go:319] [kubelet-start] Starting the kubelet
	I1123 08:57:58.435406   62386 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
	I1123 08:57:58.435734   62386 kubeadm.go:319] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
	I1123 08:57:57.259625   62480 main.go:143] libmachine: Error dialing TCP: dial tcp 192.168.83.137:22: connect: connection refused
	I1123 08:58:00.375540   62480 main.go:143] libmachine: SSH cmd err, output: <nil>: 
	I1123 08:58:00.379895   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:00.380474   62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
	I1123 08:58:00.380511   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:00.380795   62480 profile.go:143] Saving config to /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/default-k8s-diff-port-925051/config.json ...
	I1123 08:58:00.381087   62480 machine.go:94] provisionDockerMachine start ...
	I1123 08:58:00.384347   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:00.384859   62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
	I1123 08:58:00.384898   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:00.385108   62480 main.go:143] libmachine: Using SSH client type: native
	I1123 08:58:00.385436   62480 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.83.137 22 <nil> <nil>}
	I1123 08:58:00.385456   62480 main.go:143] libmachine: About to run SSH command:
	hostname
	I1123 08:58:00.505124   62480 main.go:143] libmachine: SSH cmd err, output: <nil>: minikube
	
	I1123 08:58:00.505170   62480 buildroot.go:166] provisioning hostname "default-k8s-diff-port-925051"
	I1123 08:58:00.509221   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:00.509702   62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
	I1123 08:58:00.509735   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:00.509925   62480 main.go:143] libmachine: Using SSH client type: native
	I1123 08:58:00.510144   62480 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.83.137 22 <nil> <nil>}
	I1123 08:58:00.510161   62480 main.go:143] libmachine: About to run SSH command:
	sudo hostname default-k8s-diff-port-925051 && echo "default-k8s-diff-port-925051" | sudo tee /etc/hostname
	I1123 08:58:00.644600   62480 main.go:143] libmachine: SSH cmd err, output: <nil>: default-k8s-diff-port-925051
	
	I1123 08:58:00.648066   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:00.648604   62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
	I1123 08:58:00.648630   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:00.648845   62480 main.go:143] libmachine: Using SSH client type: native
	I1123 08:58:00.649045   62480 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.83.137 22 <nil> <nil>}
	I1123 08:58:00.649060   62480 main.go:143] libmachine: About to run SSH command:
	
			if ! grep -xq '.*\sdefault-k8s-diff-port-925051' /etc/hosts; then
				if grep -xq '127.0.1.1\s.*' /etc/hosts; then
					sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 default-k8s-diff-port-925051/g' /etc/hosts;
				else 
					echo '127.0.1.1 default-k8s-diff-port-925051' | sudo tee -a /etc/hosts; 
				fi
			fi
	I1123 08:58:00.768996   62480 main.go:143] libmachine: SSH cmd err, output: <nil>: 
	I1123 08:58:00.769030   62480 buildroot.go:172] set auth options {CertDir:/home/jenkins/minikube-integration/21966-18241/.minikube CaCertPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21966-18241/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21966-18241/.minikube}
	I1123 08:58:00.769067   62480 buildroot.go:174] setting up certificates
	I1123 08:58:00.769088   62480 provision.go:84] configureAuth start
	I1123 08:58:00.772355   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:00.772869   62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
	I1123 08:58:00.772909   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:00.775615   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:00.776035   62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
	I1123 08:58:00.776086   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:00.776228   62480 provision.go:143] copyHostCerts
	I1123 08:58:00.776306   62480 exec_runner.go:144] found /home/jenkins/minikube-integration/21966-18241/.minikube/ca.pem, removing ...
	I1123 08:58:00.776319   62480 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21966-18241/.minikube/ca.pem
	I1123 08:58:00.776391   62480 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21966-18241/.minikube/ca.pem (1082 bytes)
	I1123 08:58:00.776518   62480 exec_runner.go:144] found /home/jenkins/minikube-integration/21966-18241/.minikube/cert.pem, removing ...
	I1123 08:58:00.776529   62480 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21966-18241/.minikube/cert.pem
	I1123 08:58:00.776558   62480 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21966-18241/.minikube/cert.pem (1123 bytes)
	I1123 08:58:00.776642   62480 exec_runner.go:144] found /home/jenkins/minikube-integration/21966-18241/.minikube/key.pem, removing ...
	I1123 08:58:00.776653   62480 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21966-18241/.minikube/key.pem
	I1123 08:58:00.776678   62480 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21966-18241/.minikube/key.pem (1675 bytes)
	I1123 08:58:00.776751   62480 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21966-18241/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca-key.pem org=jenkins.default-k8s-diff-port-925051 san=[127.0.0.1 192.168.83.137 default-k8s-diff-port-925051 localhost minikube]
	I1123 08:58:00.949651   62480 provision.go:177] copyRemoteCerts
	I1123 08:58:00.949711   62480 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
	I1123 08:58:00.952558   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:00.952960   62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
	I1123 08:58:00.952982   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:00.953136   62480 sshutil.go:53] new ssh client: &{IP:192.168.83.137 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/default-k8s-diff-port-925051/id_rsa Username:docker}
	I1123 08:58:01.044089   62480 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
	I1123 08:58:01.077898   62480 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
	I1123 08:58:01.115919   62480 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/machines/server.pem --> /etc/docker/server.pem (1249 bytes)
	I1123 08:58:01.157254   62480 provision.go:87] duration metric: took 388.131412ms to configureAuth
	I1123 08:58:01.157285   62480 buildroot.go:189] setting minikube options for container-runtime
	I1123 08:58:01.157510   62480 config.go:182] Loaded profile config "default-k8s-diff-port-925051": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
	I1123 08:58:01.160663   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:01.161248   62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
	I1123 08:58:01.161295   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:01.161496   62480 main.go:143] libmachine: Using SSH client type: native
	I1123 08:58:01.161777   62480 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.83.137 22 <nil> <nil>}
	I1123 08:58:01.161792   62480 main.go:143] libmachine: About to run SSH command:
	df --output=fstype / | tail -n 1
	I1123 08:58:01.278322   62480 main.go:143] libmachine: SSH cmd err, output: <nil>: tmpfs
	
	I1123 08:58:01.278347   62480 buildroot.go:70] root file system type: tmpfs
	I1123 08:58:01.278524   62480 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
	I1123 08:58:01.281592   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:01.282050   62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
	I1123 08:58:01.282098   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:01.282395   62480 main.go:143] libmachine: Using SSH client type: native
	I1123 08:58:01.282601   62480 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.83.137 22 <nil> <nil>}
	I1123 08:58:01.282650   62480 main.go:143] libmachine: About to run SSH command:
	sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
	Description=Docker Application Container Engine
	Documentation=https://docs.docker.com
	After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
	Wants=network-online.target containerd.service
	Requires=docker.socket
	StartLimitBurst=3
	StartLimitIntervalSec=60
	[Service]
	Type=notify
	Restart=always
	
	
	
	ExecStart=
	ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 \
		-H fd:// --containerd=/run/containerd/containerd.sock \
		-H unix:///var/run/docker.sock \
		--default-ulimit=nofile=1048576:1048576 \
		--tlsverify \
		--tlscacert /etc/docker/ca.pem \
		--tlscert /etc/docker/server.pem \
		--tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12 
	ExecReload=/bin/kill -s HUP \$MAINPID
	
	# Having non-zero Limit*s causes performance problems due to accounting overhead
	# in the kernel. We recommend using cgroups to do container-local accounting.
	LimitNOFILE=infinity
	LimitNPROC=infinity
	LimitCORE=infinity
	
	# Uncomment TasksMax if your systemd version supports it.
	# Only systemd 226 and above support this version.
	TasksMax=infinity
	
	# set delegate yes so that systemd does not reset the cgroups of docker containers
	Delegate=yes
	
	# kill only the docker process, not all processes in the cgroup
	KillMode=process
	OOMScoreAdjust=-500
	
	[Install]
	WantedBy=multi-user.target
	" | sudo tee /lib/systemd/system/docker.service.new
	I1123 08:58:01.426254   62480 main.go:143] libmachine: SSH cmd err, output: <nil>: [Unit]
	Description=Docker Application Container Engine
	Documentation=https://docs.docker.com
	After=network-online.target minikube-automount.service nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
	Wants=network-online.target containerd.service
	Requires=docker.socket
	StartLimitBurst=3
	StartLimitIntervalSec=60
	[Service]
	Type=notify
	Restart=always
	
	
	
	ExecStart=
	ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 	-H fd:// --containerd=/run/containerd/containerd.sock 	-H unix:///var/run/docker.sock 	--default-ulimit=nofile=1048576:1048576 	--tlsverify 	--tlscacert /etc/docker/ca.pem 	--tlscert /etc/docker/server.pem 	--tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12 
	ExecReload=/bin/kill -s HUP $MAINPID
	
	# Having non-zero Limit*s causes performance problems due to accounting overhead
	# in the kernel. We recommend using cgroups to do container-local accounting.
	LimitNOFILE=infinity
	LimitNPROC=infinity
	LimitCORE=infinity
	
	# Uncomment TasksMax if your systemd version supports it.
	# Only systemd 226 and above support this version.
	TasksMax=infinity
	
	# set delegate yes so that systemd does not reset the cgroups of docker containers
	Delegate=yes
	
	# kill only the docker process, not all processes in the cgroup
	KillMode=process
	OOMScoreAdjust=-500
	
	[Install]
	WantedBy=multi-user.target
	
	I1123 08:58:01.429123   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:01.429531   62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
	I1123 08:58:01.429561   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:01.429727   62480 main.go:143] libmachine: Using SSH client type: native
	I1123 08:58:01.429945   62480 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.83.137 22 <nil> <nil>}
	I1123 08:58:01.429968   62480 main.go:143] libmachine: About to run SSH command:
	sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
	I1123 08:57:59.438296   62386 kubeadm.go:319] [kubelet-check] The kubelet is healthy after 1.003129845s
	I1123 08:57:59.442059   62386 kubeadm.go:319] [control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s
	I1123 08:57:59.442209   62386 kubeadm.go:319] [control-plane-check] Checking kube-apiserver at https://192.168.39.87:8443/livez
	I1123 08:57:59.442348   62386 kubeadm.go:319] [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz
	I1123 08:57:59.442479   62386 kubeadm.go:319] [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez
	I1123 08:58:01.938904   62386 kubeadm.go:319] [control-plane-check] kube-controller-manager is healthy after 2.497307336s
	I1123 08:58:03.405770   62386 kubeadm.go:319] [control-plane-check] kube-scheduler is healthy after 3.965160338s
	I1123 08:58:05.442827   62386 kubeadm.go:319] [control-plane-check] kube-apiserver is healthy after 6.002687393s
	I1123 08:58:05.466318   62386 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
	I1123 08:58:05.495033   62386 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
	I1123 08:58:05.522725   62386 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs
	I1123 08:58:05.523012   62386 kubeadm.go:319] [mark-control-plane] Marking the node newest-cni-078196 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
	I1123 08:58:05.543260   62386 kubeadm.go:319] [bootstrap-token] Using token: dgrodg.6ciokz1biodl2yci
	I1123 08:58:02.622394   62480 main.go:143] libmachine: SSH cmd err, output: <nil>: diff: can't stat '/lib/systemd/system/docker.service': No such file or directory
	Created symlink '/etc/systemd/system/multi-user.target.wants/docker.service' → '/usr/lib/systemd/system/docker.service'.
	
	I1123 08:58:02.622428   62480 machine.go:97] duration metric: took 2.24132298s to provisionDockerMachine
	I1123 08:58:02.622443   62480 start.go:293] postStartSetup for "default-k8s-diff-port-925051" (driver="kvm2")
	I1123 08:58:02.622457   62480 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
	I1123 08:58:02.622522   62480 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
	I1123 08:58:02.625753   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:02.626334   62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
	I1123 08:58:02.626374   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:02.626567   62480 sshutil.go:53] new ssh client: &{IP:192.168.83.137 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/default-k8s-diff-port-925051/id_rsa Username:docker}
	I1123 08:58:02.732392   62480 ssh_runner.go:195] Run: cat /etc/os-release
	I1123 08:58:02.737975   62480 info.go:137] Remote host: Buildroot 2025.02
	I1123 08:58:02.738010   62480 filesync.go:126] Scanning /home/jenkins/minikube-integration/21966-18241/.minikube/addons for local assets ...
	I1123 08:58:02.738111   62480 filesync.go:126] Scanning /home/jenkins/minikube-integration/21966-18241/.minikube/files for local assets ...
	I1123 08:58:02.738225   62480 filesync.go:149] local asset: /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/ssl/certs/221482.pem -> 221482.pem in /etc/ssl/certs
	I1123 08:58:02.738341   62480 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
	I1123 08:58:02.755815   62480 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/ssl/certs/221482.pem --> /etc/ssl/certs/221482.pem (1708 bytes)
	I1123 08:58:02.790325   62480 start.go:296] duration metric: took 167.864143ms for postStartSetup
	I1123 08:58:02.790381   62480 fix.go:56] duration metric: took 20.323185295s for fixHost
	I1123 08:58:02.793471   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:02.793912   62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
	I1123 08:58:02.793950   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:02.794223   62480 main.go:143] libmachine: Using SSH client type: native
	I1123 08:58:02.794447   62480 main.go:143] libmachine: &{{{<nil> 0 [] [] []} docker [0x84d1c0] 0x84fe60 <nil>  [] 0s} 192.168.83.137 22 <nil> <nil>}
	I1123 08:58:02.794458   62480 main.go:143] libmachine: About to run SSH command:
	date +%s.%N
	I1123 08:58:02.907310   62480 main.go:143] libmachine: SSH cmd err, output: <nil>: 1763888282.872914256
	
	I1123 08:58:02.907338   62480 fix.go:216] guest clock: 1763888282.872914256
	I1123 08:58:02.907348   62480 fix.go:229] Guest: 2025-11-23 08:58:02.872914256 +0000 UTC Remote: 2025-11-23 08:58:02.790385341 +0000 UTC m=+45.999028572 (delta=82.528915ms)
	I1123 08:58:02.907369   62480 fix.go:200] guest clock delta is within tolerance: 82.528915ms
	I1123 08:58:02.907375   62480 start.go:83] releasing machines lock for "default-k8s-diff-port-925051", held for 20.440202624s
	I1123 08:58:02.910604   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:02.911104   62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
	I1123 08:58:02.911130   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:02.911758   62480 ssh_runner.go:195] Run: cat /version.json
	I1123 08:58:02.911816   62480 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
	I1123 08:58:02.915121   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:02.915430   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:02.915677   62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
	I1123 08:58:02.915710   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:02.915907   62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
	I1123 08:58:02.915942   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:02.915932   62480 sshutil.go:53] new ssh client: &{IP:192.168.83.137 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/default-k8s-diff-port-925051/id_rsa Username:docker}
	I1123 08:58:02.916129   62480 sshutil.go:53] new ssh client: &{IP:192.168.83.137 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/default-k8s-diff-port-925051/id_rsa Username:docker}
	I1123 08:58:03.020815   62480 ssh_runner.go:195] Run: systemctl --version
	I1123 08:58:03.028066   62480 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
	W1123 08:58:03.036089   62480 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
	I1123 08:58:03.036168   62480 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
	I1123 08:58:03.059461   62480 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
	I1123 08:58:03.059497   62480 start.go:496] detecting cgroup driver to use...
	I1123 08:58:03.059639   62480 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
	" | sudo tee /etc/crictl.yaml"
	I1123 08:58:03.085945   62480 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
	I1123 08:58:03.100188   62480 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
	I1123 08:58:03.114121   62480 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
	I1123 08:58:03.114197   62480 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
	I1123 08:58:03.128502   62480 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
	I1123 08:58:03.141941   62480 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
	I1123 08:58:03.155742   62480 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
	I1123 08:58:03.170251   62480 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
	I1123 08:58:03.185473   62480 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
	I1123 08:58:03.199212   62480 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
	I1123 08:58:03.212441   62480 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1  enable_unprivileged_ports = true|' /etc/containerd/config.toml"
	I1123 08:58:03.225457   62480 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
	I1123 08:58:03.237735   62480 crio.go:166] couldn't verify netfilter by "sudo sysctl net.bridge.bridge-nf-call-iptables" which might be okay. error: sudo sysctl net.bridge.bridge-nf-call-iptables: Process exited with status 1
	stdout:
	
	stderr:
	sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
	I1123 08:58:03.237807   62480 ssh_runner.go:195] Run: sudo modprobe br_netfilter
	I1123 08:58:03.251616   62480 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
	I1123 08:58:03.264293   62480 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1123 08:58:03.431052   62480 ssh_runner.go:195] Run: sudo systemctl restart containerd
	I1123 08:58:03.484769   62480 start.go:496] detecting cgroup driver to use...
	I1123 08:58:03.484887   62480 ssh_runner.go:195] Run: sudo systemctl cat docker.service
	I1123 08:58:03.515067   62480 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
	I1123 08:58:03.538674   62480 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
	I1123 08:58:03.566269   62480 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
	I1123 08:58:03.585483   62480 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
	I1123 08:58:03.603778   62480 ssh_runner.go:195] Run: sudo systemctl stop -f crio
	I1123 08:58:03.640497   62480 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
	I1123 08:58:03.659085   62480 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
	" | sudo tee /etc/crictl.yaml"
	I1123 08:58:03.687162   62480 ssh_runner.go:195] Run: which cri-dockerd
	I1123 08:58:03.694175   62480 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
	I1123 08:58:03.712519   62480 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (192 bytes)
	I1123 08:58:03.741521   62480 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
	I1123 08:58:03.916394   62480 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
	I1123 08:58:04.069031   62480 docker.go:575] configuring docker to use "cgroupfs" as cgroup driver...
	I1123 08:58:04.069190   62480 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
	I1123 08:58:04.093301   62480 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
	I1123 08:58:04.109417   62480 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1123 08:58:04.272454   62480 ssh_runner.go:195] Run: sudo systemctl restart docker
	I1123 08:58:04.931701   62480 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
	I1123 08:58:04.948944   62480 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
	I1123 08:58:04.971544   62480 ssh_runner.go:195] Run: sudo systemctl stop cri-docker.socket
	I1123 08:58:05.005474   62480 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
	I1123 08:58:05.031097   62480 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
	I1123 08:58:05.200507   62480 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
	I1123 08:58:05.394816   62480 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1123 08:58:05.619873   62480 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
	I1123 08:58:05.666855   62480 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
	I1123 08:58:05.685142   62480 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1123 08:58:05.848671   62480 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
	I1123 08:58:05.996045   62480 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
	I1123 08:58:06.018056   62480 start.go:543] Will wait 60s for socket path /var/run/cri-dockerd.sock
	I1123 08:58:06.018168   62480 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
	I1123 08:58:06.026546   62480 start.go:564] Will wait 60s for crictl version
	I1123 08:58:06.026630   62480 ssh_runner.go:195] Run: which crictl
	I1123 08:58:06.032819   62480 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
	I1123 08:58:06.084168   62480 start.go:580] Version:  0.1.0
	RuntimeName:  docker
	RuntimeVersion:  28.5.1
	RuntimeApiVersion:  v1
	I1123 08:58:06.084266   62480 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
	I1123 08:58:06.126882   62480 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
	I1123 08:58:06.163943   62480 out.go:252] * Preparing Kubernetes v1.34.1 on Docker 28.5.1 ...
	I1123 08:58:06.168664   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:06.169284   62480 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:19:c7:db", ip: ""} in network mk-default-k8s-diff-port-925051: {Iface:virbr5 ExpiryTime:2025-11-23 09:57:56 +0000 UTC Type:0 Mac:52:54:00:19:c7:db Iaid: IPaddr:192.168.83.137 Prefix:24 Hostname:default-k8s-diff-port-925051 Clientid:01:52:54:00:19:c7:db}
	I1123 08:58:06.169324   62480 main.go:143] libmachine: domain default-k8s-diff-port-925051 has defined IP address 192.168.83.137 and MAC address 52:54:00:19:c7:db in network mk-default-k8s-diff-port-925051
	I1123 08:58:06.169553   62480 ssh_runner.go:195] Run: grep 192.168.83.1	host.minikube.internal$ /etc/hosts
	I1123 08:58:06.176801   62480 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.83.1	host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
	I1123 08:58:06.201834   62480 kubeadm.go:884] updating cluster {Name:default-k8s-diff-port-925051 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21924/minikube-v1.37.0-1763503576-21924-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:3072 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8444 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{Kubernete
sVersion:v1.34.1 ClusterName:default-k8s-diff-port-925051 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.83.137 Port:8444 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[dashboard:true default-storageclass:true metrics-server:true storage-provisioner:true] CustomAddonImages:map[MetricsScraper:registry.k8s.io/echoserver:1.4 MetricsServer:registry.k8s.io/echoserver:1.4] CustomAddonRegistries:map[MetricsServer:fake.domain] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: N
etwork: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
	I1123 08:58:06.201979   62480 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime docker
	I1123 08:58:06.202051   62480 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
	I1123 08:58:06.228393   62480 docker.go:691] Got preloaded images: -- stdout --
	gcr.io/k8s-minikube/gvisor-addon:2
	registry.k8s.io/kube-apiserver:v1.34.1
	registry.k8s.io/kube-controller-manager:v1.34.1
	registry.k8s.io/kube-scheduler:v1.34.1
	registry.k8s.io/kube-proxy:v1.34.1
	registry.k8s.io/etcd:3.6.4-0
	registry.k8s.io/pause:3.10.1
	registry.k8s.io/coredns/coredns:v1.12.1
	gcr.io/k8s-minikube/storage-provisioner:v5
	gcr.io/k8s-minikube/busybox:1.28.4-glibc
	
	-- /stdout --
	I1123 08:58:06.228418   62480 docker.go:621] Images already preloaded, skipping extraction
	I1123 08:58:06.228478   62480 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
	I1123 08:58:06.253832   62480 docker.go:691] Got preloaded images: -- stdout --
	gcr.io/k8s-minikube/gvisor-addon:2
	registry.k8s.io/kube-controller-manager:v1.34.1
	registry.k8s.io/kube-scheduler:v1.34.1
	registry.k8s.io/kube-apiserver:v1.34.1
	registry.k8s.io/kube-proxy:v1.34.1
	registry.k8s.io/etcd:3.6.4-0
	registry.k8s.io/pause:3.10.1
	registry.k8s.io/coredns/coredns:v1.12.1
	gcr.io/k8s-minikube/storage-provisioner:v5
	gcr.io/k8s-minikube/busybox:1.28.4-glibc
	
	-- /stdout --
	I1123 08:58:06.253872   62480 cache_images.go:86] Images are preloaded, skipping loading
	I1123 08:58:06.253886   62480 kubeadm.go:935] updating node { 192.168.83.137 8444 v1.34.1 docker true true} ...
	I1123 08:58:06.254046   62480 kubeadm.go:947] kubelet [Unit]
	Wants=docker.socket
	
	[Service]
	ExecStart=
	ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=default-k8s-diff-port-925051 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.83.137
	
	[Install]
	 config:
	{KubernetesVersion:v1.34.1 ClusterName:default-k8s-diff-port-925051 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
	I1123 08:58:06.254117   62480 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
	I1123 08:58:06.333361   62480 cni.go:84] Creating CNI manager for ""
	I1123 08:58:06.333408   62480 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
	I1123 08:58:06.333432   62480 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
	I1123 08:58:06.333457   62480 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.83.137 APIServerPort:8444 KubernetesVersion:v1.34.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:default-k8s-diff-port-925051 NodeName:default-k8s-diff-port-925051 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.83.137"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.83.137 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/c
erts/ca.crt StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
	I1123 08:58:06.333702   62480 kubeadm.go:196] kubeadm config:
	apiVersion: kubeadm.k8s.io/v1beta4
	kind: InitConfiguration
	localAPIEndpoint:
	  advertiseAddress: 192.168.83.137
	  bindPort: 8444
	bootstrapTokens:
	  - groups:
	      - system:bootstrappers:kubeadm:default-node-token
	    ttl: 24h0m0s
	    usages:
	      - signing
	      - authentication
	nodeRegistration:
	  criSocket: unix:///var/run/cri-dockerd.sock
	  name: "default-k8s-diff-port-925051"
	  kubeletExtraArgs:
	    - name: "node-ip"
	      value: "192.168.83.137"
	  taints: []
	---
	apiVersion: kubeadm.k8s.io/v1beta4
	kind: ClusterConfiguration
	apiServer:
	  certSANs: ["127.0.0.1", "localhost", "192.168.83.137"]
	  extraArgs:
	    - name: "enable-admission-plugins"
	      value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
	controllerManager:
	  extraArgs:
	    - name: "allocate-node-cidrs"
	      value: "true"
	    - name: "leader-elect"
	      value: "false"
	scheduler:
	  extraArgs:
	    - name: "leader-elect"
	      value: "false"
	certificatesDir: /var/lib/minikube/certs
	clusterName: mk
	controlPlaneEndpoint: control-plane.minikube.internal:8444
	etcd:
	  local:
	    dataDir: /var/lib/minikube/etcd
	kubernetesVersion: v1.34.1
	networking:
	  dnsDomain: cluster.local
	  podSubnet: "10.244.0.0/16"
	  serviceSubnet: 10.96.0.0/12
	---
	apiVersion: kubelet.config.k8s.io/v1beta1
	kind: KubeletConfiguration
	authentication:
	  x509:
	    clientCAFile: /var/lib/minikube/certs/ca.crt
	cgroupDriver: cgroupfs
	containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
	hairpinMode: hairpin-veth
	runtimeRequestTimeout: 15m
	clusterDomain: "cluster.local"
	# disable disk resource management by default
	imageGCHighThresholdPercent: 100
	evictionHard:
	  nodefs.available: "0%"
	  nodefs.inodesFree: "0%"
	  imagefs.available: "0%"
	failSwapOn: false
	staticPodPath: /etc/kubernetes/manifests
	---
	apiVersion: kubeproxy.config.k8s.io/v1alpha1
	kind: KubeProxyConfiguration
	clusterCIDR: "10.244.0.0/16"
	metricsBindAddress: 0.0.0.0:10249
	conntrack:
	  maxPerCore: 0
	# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
	  tcpEstablishedTimeout: 0s
	# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
	  tcpCloseWaitTimeout: 0s
	
	I1123 08:58:06.333784   62480 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.1
	I1123 08:58:06.356565   62480 binaries.go:51] Found k8s binaries, skipping transfer
	I1123 08:58:06.356666   62480 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
	I1123 08:58:06.376736   62480 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (329 bytes)
	I1123 08:58:06.412797   62480 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
	I1123 08:58:06.447785   62480 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2235 bytes)
	I1123 08:58:06.486793   62480 ssh_runner.go:195] Run: grep 192.168.83.137	control-plane.minikube.internal$ /etc/hosts
	I1123 08:58:06.494943   62480 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.83.137	control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
	I1123 08:58:06.522673   62480 ssh_runner.go:195] Run: sudo systemctl daemon-reload
	I1123 08:58:06.760714   62480 ssh_runner.go:195] Run: sudo systemctl start kubelet
	I1123 08:58:06.816865   62480 certs.go:69] Setting up /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/default-k8s-diff-port-925051 for IP: 192.168.83.137
	I1123 08:58:06.817014   62480 certs.go:195] generating shared ca certs ...
	I1123 08:58:06.817069   62480 certs.go:227] acquiring lock for ca certs: {Name:mk4438f2b659811ea2f01e009d28f1b857a5024c Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
	I1123 08:58:06.817298   62480 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21966-18241/.minikube/ca.key
	I1123 08:58:06.817470   62480 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21966-18241/.minikube/proxy-client-ca.key
	I1123 08:58:06.817524   62480 certs.go:257] generating profile certs ...
	I1123 08:58:06.817689   62480 certs.go:360] skipping valid signed profile cert regeneration for "minikube-user": /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/default-k8s-diff-port-925051/client.key
	I1123 08:58:06.817768   62480 certs.go:360] skipping valid signed profile cert regeneration for "minikube": /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/default-k8s-diff-port-925051/apiserver.key.3e63079d
	I1123 08:58:06.817847   62480 certs.go:360] skipping valid signed profile cert regeneration for "aggregator": /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/default-k8s-diff-port-925051/proxy-client.key
	I1123 08:58:06.818039   62480 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/22148.pem (1338 bytes)
	W1123 08:58:06.818089   62480 certs.go:480] ignoring /home/jenkins/minikube-integration/21966-18241/.minikube/certs/22148_empty.pem, impossibly tiny 0 bytes
	I1123 08:58:06.818100   62480 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca-key.pem (1675 bytes)
	I1123 08:58:06.818136   62480 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/ca.pem (1082 bytes)
	I1123 08:58:06.818179   62480 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/cert.pem (1123 bytes)
	I1123 08:58:06.818209   62480 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/certs/key.pem (1675 bytes)
	I1123 08:58:06.818301   62480 certs.go:484] found cert: /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/ssl/certs/221482.pem (1708 bytes)
	I1123 08:58:06.819187   62480 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21966-18241/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
	I1123 08:58:05.545959   62386 out.go:252]   - Configuring RBAC rules ...
	I1123 08:58:05.546132   62386 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
	I1123 08:58:05.554804   62386 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
	I1123 08:58:05.569723   62386 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
	I1123 08:58:05.574634   62386 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
	I1123 08:58:05.579213   62386 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
	I1123 08:58:05.585176   62386 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
	I1123 08:58:05.855390   62386 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
	I1123 08:58:06.305498   62386 kubeadm.go:319] [addons] Applied essential addon: CoreDNS
	I1123 08:58:06.860572   62386 kubeadm.go:319] [addons] Applied essential addon: kube-proxy
	I1123 08:58:06.862132   62386 kubeadm.go:319] 
	I1123 08:58:06.862300   62386 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully!
	I1123 08:58:06.862315   62386 kubeadm.go:319] 
	I1123 08:58:06.862459   62386 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user:
	I1123 08:58:06.862488   62386 kubeadm.go:319] 
	I1123 08:58:06.862544   62386 kubeadm.go:319]   mkdir -p $HOME/.kube
	I1123 08:58:06.862628   62386 kubeadm.go:319]   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
	I1123 08:58:06.862700   62386 kubeadm.go:319]   sudo chown $(id -u):$(id -g) $HOME/.kube/config
	I1123 08:58:06.862710   62386 kubeadm.go:319] 
	I1123 08:58:06.862788   62386 kubeadm.go:319] Alternatively, if you are the root user, you can run:
	I1123 08:58:06.862797   62386 kubeadm.go:319] 
	I1123 08:58:06.862866   62386 kubeadm.go:319]   export KUBECONFIG=/etc/kubernetes/admin.conf
	I1123 08:58:06.862875   62386 kubeadm.go:319] 
	I1123 08:58:06.862984   62386 kubeadm.go:319] You should now deploy a pod network to the cluster.
	I1123 08:58:06.863098   62386 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
	I1123 08:58:06.863220   62386 kubeadm.go:319]   https://kubernetes.io/docs/concepts/cluster-administration/addons/
	I1123 08:58:06.863243   62386 kubeadm.go:319] 
	I1123 08:58:06.863353   62386 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities
	I1123 08:58:06.863463   62386 kubeadm.go:319] and service account keys on each node and then running the following as root:
	I1123 08:58:06.863473   62386 kubeadm.go:319] 
	I1123 08:58:06.863589   62386 kubeadm.go:319]   kubeadm join control-plane.minikube.internal:8443 --token dgrodg.6ciokz1biodl2yci \
	I1123 08:58:06.863736   62386 kubeadm.go:319] 	--discovery-token-ca-cert-hash sha256:4395c5eefb8e424e96dd1759797a1c8f0fafb8cddc9a1a46a496a26ff5b9685a \
	I1123 08:58:06.863769   62386 kubeadm.go:319] 	--control-plane 
	I1123 08:58:06.863778   62386 kubeadm.go:319] 
	I1123 08:58:06.863904   62386 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root:
	I1123 08:58:06.863913   62386 kubeadm.go:319] 
	I1123 08:58:06.864056   62386 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token dgrodg.6ciokz1biodl2yci \
	I1123 08:58:06.864229   62386 kubeadm.go:319] 	--discovery-token-ca-cert-hash sha256:4395c5eefb8e424e96dd1759797a1c8f0fafb8cddc9a1a46a496a26ff5b9685a 
	I1123 08:58:06.865336   62386 kubeadm.go:319] 	[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
	I1123 08:58:06.865367   62386 cni.go:84] Creating CNI manager for ""
	I1123 08:58:06.865396   62386 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
	I1123 08:58:06.867294   62386 out.go:179] * Configuring bridge CNI (Container Networking Interface) ...
	I1123 08:58:06.868866   62386 ssh_runner.go:195] Run: sudo mkdir -p /etc/cni/net.d
	I1123 08:58:06.887652   62386 ssh_runner.go:362] scp memory --> /etc/cni/net.d/1-k8s.conflist (496 bytes)
	I1123 08:58:06.925093   62386 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
	I1123 08:58:06.925265   62386 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
	I1123 08:58:06.925355   62386 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes newest-cni-078196 minikube.k8s.io/updated_at=2025_11_23T08_58_06_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=3e219827a5f064cf736992b79e59864301ece66e minikube.k8s.io/name=newest-cni-078196 minikube.k8s.io/primary=true
	I1123 08:58:07.139216   62386 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1123 08:58:07.139367   62386 ops.go:34] apiserver oom_adj: -16
	I1123 08:58:07.639626   62386 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1123 08:58:08.140356   62386 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	I1123 08:58:08.639822   62386 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
	
	
	==> Docker <==
	Nov 23 08:57:20 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:20.403847294Z" level=warning msg="reference for unknown type: application/vnd.docker.distribution.manifest.v1+prettyjws" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4"
	Nov 23 08:57:20 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:20.530278754Z" level=warning msg="Error persisting manifest" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" error="error committing manifest to content store: commit failed: unexpected commit digest sha256:eaee4c452b076cdb05b391ed7e75e1ad0aca136665875ab5d7e2f3d9f4675769, expected sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb: failed precondition" remote="registry.k8s.io/echoserver:1.4"
	Nov 23 08:57:20 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:20.530380987Z" level=info msg="Attempting next endpoint for pull after error: Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	Nov 23 08:57:20 no-preload-019660 cri-dockerd[1543]: time="2025-11-23T08:57:20Z" level=info msg="Stop pulling image registry.k8s.io/echoserver:1.4: 1.4: Pulling from echoserver"
	Nov 23 08:57:20 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:20.541222738Z" level=warning msg="Error getting v2 registry: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
	Nov 23 08:57:20 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:20.541313635Z" level=info msg="Attempting next endpoint for pull after error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
	Nov 23 08:57:20 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:20.544639412Z" level=error msg="unexpected HTTP error handling" error="<nil>"
	Nov 23 08:57:20 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:20.544665809Z" level=error msg="Handler for POST /v1.46/images/create returned error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
	Nov 23 08:57:20 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:20.895558802Z" level=info msg="ignoring event" container=5858e2fd1e0f544e020a845d1e9aa15e86c2117c0ebff9dfe1b6f4d96f844434 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
	Nov 23 08:57:21 no-preload-019660 cri-dockerd[1543]: time="2025-11-23T08:57:21Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/f70de02d77443d2041cfe03c25cb36b6f758dd4e678353419ea55ac106e8b68a/resolv.conf as [nameserver 10.96.0.10 search kubernetes-dashboard.svc.cluster.local svc.cluster.local cluster.local options ndots:5]"
	Nov 23 08:57:32 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:32.990740143Z" level=warning msg="reference for unknown type: application/vnd.docker.distribution.manifest.v1+prettyjws" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4"
	Nov 23 08:57:33 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:33.076597693Z" level=warning msg="Error persisting manifest" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" error="error committing manifest to content store: commit failed: unexpected commit digest sha256:eaee4c452b076cdb05b391ed7e75e1ad0aca136665875ab5d7e2f3d9f4675769, expected sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb: failed precondition" remote="registry.k8s.io/echoserver:1.4"
	Nov 23 08:57:33 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:33.076828182Z" level=info msg="Attempting next endpoint for pull after error: Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	Nov 23 08:57:33 no-preload-019660 cri-dockerd[1543]: time="2025-11-23T08:57:33Z" level=info msg="Stop pulling image registry.k8s.io/echoserver:1.4: 1.4: Pulling from echoserver"
	Nov 23 08:57:33 no-preload-019660 dockerd[1171]: time="2025-11-23T08:57:33.550212350Z" level=info msg="ignoring event" container=1f0a2f0aefa9b826288b8b721a751f41c880f8daa0983c581ae8b039871db1a1 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
	Nov 23 08:58:07 no-preload-019660 cri-dockerd[1543]: time="2025-11-23T08:58:07Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:10.244.0.0/24,},}"
	Nov 23 08:58:07 no-preload-019660 cri-dockerd[1543]: time="2025-11-23T08:58:07Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-66bc5c9577-lp6jk_kube-system\": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container \"9a49ebff42d5eef5c3e23db2e1ab337396080dea6c13220062ba5e0e48a95cc8\""
	Nov 23 08:58:08 no-preload-019660 dockerd[1171]: time="2025-11-23T08:58:08.760065184Z" level=warning msg="reference for unknown type: application/vnd.docker.distribution.manifest.v1+prettyjws" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" remote="registry.k8s.io/echoserver:1.4"
	Nov 23 08:58:08 no-preload-019660 dockerd[1171]: time="2025-11-23T08:58:08.863488316Z" level=warning msg="Error persisting manifest" digest="sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb" error="error committing manifest to content store: commit failed: unexpected commit digest sha256:eaee4c452b076cdb05b391ed7e75e1ad0aca136665875ab5d7e2f3d9f4675769, expected sha256:5d99aa1120524c801bc8c1a7077e8f5ec122ba16b6dda1a5d3826057f67b9bcb: failed precondition" remote="registry.k8s.io/echoserver:1.4"
	Nov 23 08:58:08 no-preload-019660 dockerd[1171]: time="2025-11-23T08:58:08.863610785Z" level=info msg="Attempting next endpoint for pull after error: Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/"
	Nov 23 08:58:08 no-preload-019660 cri-dockerd[1543]: time="2025-11-23T08:58:08Z" level=info msg="Stop pulling image registry.k8s.io/echoserver:1.4: 1.4: Pulling from echoserver"
	Nov 23 08:58:08 no-preload-019660 dockerd[1171]: time="2025-11-23T08:58:08.897944813Z" level=warning msg="Error getting v2 registry: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
	Nov 23 08:58:08 no-preload-019660 dockerd[1171]: time="2025-11-23T08:58:08.899313923Z" level=info msg="Attempting next endpoint for pull after error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
	Nov 23 08:58:08 no-preload-019660 dockerd[1171]: time="2025-11-23T08:58:08.914470304Z" level=error msg="unexpected HTTP error handling" error="<nil>"
	Nov 23 08:58:08 no-preload-019660 dockerd[1171]: time="2025-11-23T08:58:08.914503647Z" level=error msg="Handler for POST /v1.46/images/create returned error: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host"
	
	
	==> container status <==
	CONTAINER           IMAGE                                                                                                 CREATED              STATE               NAME                      ATTEMPT             POD ID              POD                                         NAMESPACE
	371de4a468901       6e38f40d628db                                                                                         3 seconds ago        Running             storage-provisioner       2                   a97e7e7100c3a       storage-provisioner                         kube-system
	57ebcdb97431d       kubernetesui/dashboard@sha256:2e500d29e9d5f4a086b908eb8dfe7ecac57d2ab09d65b24f588b1d449841ef93        51 seconds ago       Running             kubernetes-dashboard      0                   644b3c0a17fe8       kubernetes-dashboard-855c9754f9-zh9mv       kubernetes-dashboard
	58768e42678e9       56cc512116c8f                                                                                         53 seconds ago       Running             busybox                   1                   c39a5f42630b0       busybox                                     default
	f7e183883855c       52546a367cc9e                                                                                         53 seconds ago       Running             coredns                   1                   86281d14c8f1e       coredns-66bc5c9577-nj6pk                    kube-system
	1f0a2f0aefa9b       6e38f40d628db                                                                                         About a minute ago   Exited              storage-provisioner       1                   a97e7e7100c3a       storage-provisioner                         kube-system
	8c0537e27a6fb       fc25172553d79                                                                                         About a minute ago   Running             kube-proxy                1                   dd983c999b8f4       kube-proxy-wlb9w                            kube-system
	8deb34aee6ea1       5f1f5298c888d                                                                                         About a minute ago   Running             etcd                      1                   ccce046e98c9b       etcd-no-preload-019660                      kube-system
	1a4750ff7e8cb       c80c8dbafe7dd                                                                                         About a minute ago   Running             kube-controller-manager   1                   e18e6fb700516       kube-controller-manager-no-preload-019660   kube-system
	6929fc4394d1d       c3994bc696102                                                                                         About a minute ago   Running             kube-apiserver            1                   b493d9303993d       kube-apiserver-no-preload-019660            kube-system
	266be5a40ca65       7dd6aaa1717ab                                                                                         About a minute ago   Running             kube-scheduler            1                   a1f3f18719102       kube-scheduler-no-preload-019660            kube-system
	7e459e5ac3043       gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e   2 minutes ago        Exited              busybox                   0                   c0e79a536f316       busybox                                     default
	b5d2ec6064039       52546a367cc9e                                                                                         2 minutes ago        Exited              coredns                   0                   92a72987832f3       coredns-66bc5c9577-nj6pk                    kube-system
	4aea324009fdd       fc25172553d79                                                                                         2 minutes ago        Exited              kube-proxy                0                   adcf7215f30c5       kube-proxy-wlb9w                            kube-system
	57bb06d26ab69       7dd6aaa1717ab                                                                                         2 minutes ago        Exited              kube-scheduler            0                   0e3f3ba5c2b8c       kube-scheduler-no-preload-019660            kube-system
	78433f5a1dee5       5f1f5298c888d                                                                                         2 minutes ago        Exited              etcd                      0                   c90dfb42b9b72       etcd-no-preload-019660                      kube-system
	e0963762dabe6       c80c8dbafe7dd                                                                                         2 minutes ago        Exited              kube-controller-manager   0                   796e38a439eca       kube-controller-manager-no-preload-019660   kube-system
	51985d9c2b5e4       c3994bc696102                                                                                         2 minutes ago        Exited              kube-apiserver            0                   8ec1927039422       kube-apiserver-no-preload-019660            kube-system
	
	
	==> coredns [b5d2ec606403] <==
	[INFO] plugin/ready: Still waiting on: "kubernetes"
	[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
	[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
	[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
	[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
	[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
	[WARNING] plugin/kubernetes: starting server with unsynced Kubernetes API
	.:53
	[INFO] plugin/reload: Running configuration SHA512 = 1b226df79860026c6a52e67daa10d7f0d57ec5b023288ec00c5e05f93523c894564e15b91770d3a07ae1cfbe861d15b37d4a0027e69c546ab112970993a3b03b
	CoreDNS-1.12.1
	linux/amd64, go1.24.1, 707c7c1
	[INFO] plugin/ready: Still waiting on: "kubernetes"
	[INFO] plugin/ready: Still waiting on: "kubernetes"
	[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.32.3/tools/cache/reflector.go:251: failed to list *v1.EndpointSlice: Get "https://10.96.0.1:443/apis/discovery.k8s.io/v1/endpointslices?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
	[ERROR] plugin/kubernetes: Unhandled Error
	[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.32.3/tools/cache/reflector.go:251: failed to list *v1.Namespace: Get "https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
	[ERROR] plugin/kubernetes: Unhandled Error
	[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.32.3/tools/cache/reflector.go:251: failed to list *v1.Service: Get "https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
	[ERROR] plugin/kubernetes: Unhandled Error
	[INFO] Reloading
	[INFO] plugin/reload: Running configuration SHA512 = 6e77f21cd6946b87ec86c565e2060aa5d23c02882cb22fd7a321b5e8cd0c8bdafe21968fcff406405707b988b753da21ecd190fe02329f1b569bfa74920cc0fa
	[INFO] Reloading complete
	[INFO] 127.0.0.1:42110 - 29445 "HINFO IN 9017480915883545082.4400091200596631812. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.103448715s
	[INFO] SIGTERM: Shutting down servers then terminating
	[INFO] plugin/health: Going into lameduck mode for 5s
	
	
	==> coredns [f7e183883855] <==
	maxprocs: Leaving GOMAXPROCS=2: CPU quota undefined
	.:53
	[INFO] plugin/reload: Running configuration SHA512 = 6e77f21cd6946b87ec86c565e2060aa5d23c02882cb22fd7a321b5e8cd0c8bdafe21968fcff406405707b988b753da21ecd190fe02329f1b569bfa74920cc0fa
	CoreDNS-1.12.1
	linux/amd64, go1.24.1, 707c7c1
	[INFO] 127.0.0.1:55083 - 4317 "HINFO IN 4704850718228764652.4547352497864188913. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.118220473s
	
	
	==> describe nodes <==
	Name:               no-preload-019660
	Roles:              control-plane
	Labels:             beta.kubernetes.io/arch=amd64
	                    beta.kubernetes.io/os=linux
	                    kubernetes.io/arch=amd64
	                    kubernetes.io/hostname=no-preload-019660
	                    kubernetes.io/os=linux
	                    minikube.k8s.io/commit=3e219827a5f064cf736992b79e59864301ece66e
	                    minikube.k8s.io/name=no-preload-019660
	                    minikube.k8s.io/primary=true
	                    minikube.k8s.io/updated_at=2025_11_23T08_55_22_0700
	                    minikube.k8s.io/version=v1.37.0
	                    node-role.kubernetes.io/control-plane=
	                    node.kubernetes.io/exclude-from-external-load-balancers=
	Annotations:        node.alpha.kubernetes.io/ttl: 0
	                    volumes.kubernetes.io/controller-managed-attach-detach: true
	CreationTimestamp:  Sun, 23 Nov 2025 08:55:18 +0000
	Taints:             <none>
	Unschedulable:      false
	Lease:
	  HolderIdentity:  no-preload-019660
	  AcquireTime:     <unset>
	  RenewTime:       Sun, 23 Nov 2025 08:58:07 +0000
	Conditions:
	  Type             Status  LastHeartbeatTime                 LastTransitionTime                Reason                       Message
	  ----             ------  -----------------                 ------------------                ------                       -------
	  MemoryPressure   False   Sun, 23 Nov 2025 08:58:07 +0000   Sun, 23 Nov 2025 08:55:14 +0000   KubeletHasSufficientMemory   kubelet has sufficient memory available
	  DiskPressure     False   Sun, 23 Nov 2025 08:58:07 +0000   Sun, 23 Nov 2025 08:55:14 +0000   KubeletHasNoDiskPressure     kubelet has no disk pressure
	  PIDPressure      False   Sun, 23 Nov 2025 08:58:07 +0000   Sun, 23 Nov 2025 08:55:14 +0000   KubeletHasSufficientPID      kubelet has sufficient PID available
	  Ready            True    Sun, 23 Nov 2025 08:58:07 +0000   Sun, 23 Nov 2025 08:57:11 +0000   KubeletReady                 kubelet is posting ready status
	Addresses:
	  InternalIP:  192.168.50.40
	  Hostname:    no-preload-019660
	Capacity:
	  cpu:                2
	  ephemeral-storage:  17734596Ki
	  hugepages-2Mi:      0
	  memory:             3035908Ki
	  pods:               110
	Allocatable:
	  cpu:                2
	  ephemeral-storage:  17734596Ki
	  hugepages-2Mi:      0
	  memory:             3035908Ki
	  pods:               110
	System Info:
	  Machine ID:                 5db77235f15f4a52ad7c561433b2bbe5
	  System UUID:                5db77235-f15f-4a52-ad7c-561433b2bbe5
	  Boot ID:                    7c4938cf-e087-4d48-94a0-7660c53890e7
	  Kernel Version:             6.6.95
	  OS Image:                   Buildroot 2025.02
	  Operating System:           linux
	  Architecture:               amd64
	  Container Runtime Version:  docker://28.5.1
	  Kubelet Version:            v1.34.1
	  Kube-Proxy Version:         
	PodCIDR:                      10.244.0.0/24
	PodCIDRs:                     10.244.0.0/24
	Non-terminated Pods:          (11 in total)
	  Namespace                   Name                                          CPU Requests  CPU Limits  Memory Requests  Memory Limits  Age
	  ---------                   ----                                          ------------  ----------  ---------------  -------------  ---
	  default                     busybox                                       0 (0%)        0 (0%)      0 (0%)           0 (0%)         2m7s
	  kube-system                 coredns-66bc5c9577-nj6pk                      100m (5%)     0 (0%)      70Mi (2%)        170Mi (5%)     2m44s
	  kube-system                 etcd-no-preload-019660                        100m (5%)     0 (0%)      100Mi (3%)       0 (0%)         2m50s
	  kube-system                 kube-apiserver-no-preload-019660              250m (12%)    0 (0%)      0 (0%)           0 (0%)         2m50s
	  kube-system                 kube-controller-manager-no-preload-019660     200m (10%)    0 (0%)      0 (0%)           0 (0%)         2m50s
	  kube-system                 kube-proxy-wlb9w                              0 (0%)        0 (0%)      0 (0%)           0 (0%)         2m45s
	  kube-system                 kube-scheduler-no-preload-019660              100m (5%)     0 (0%)      0 (0%)           0 (0%)         2m50s
	  kube-system                 metrics-server-746fcd58dc-tg8q5               100m (5%)     0 (0%)      200Mi (6%)       0 (0%)         117s
	  kube-system                 storage-provisioner                           0 (0%)        0 (0%)      0 (0%)           0 (0%)         2m42s
	  kubernetes-dashboard        dashboard-metrics-scraper-6ffb444bf9-4965t    0 (0%)        0 (0%)      0 (0%)           0 (0%)         65s
	  kubernetes-dashboard        kubernetes-dashboard-855c9754f9-zh9mv         0 (0%)        0 (0%)      0 (0%)           0 (0%)         65s
	Allocated resources:
	  (Total limits may be over 100 percent, i.e., overcommitted.)
	  Resource           Requests     Limits
	  --------           --------     ------
	  cpu                850m (42%)   0 (0%)
	  memory             370Mi (12%)  170Mi (5%)
	  ephemeral-storage  0 (0%)       0 (0%)
	  hugepages-2Mi      0 (0%)       0 (0%)
	Events:
	  Type     Reason                   Age                    From             Message
	  ----     ------                   ----                   ----             -------
	  Normal   Starting                 2m41s                  kube-proxy       
	  Normal   Starting                 67s                    kube-proxy       
	  Normal   NodeHasSufficientMemory  2m59s (x8 over 2m59s)  kubelet          Node no-preload-019660 status is now: NodeHasSufficientMemory
	  Normal   NodeHasNoDiskPressure    2m59s (x8 over 2m59s)  kubelet          Node no-preload-019660 status is now: NodeHasNoDiskPressure
	  Normal   NodeHasSufficientPID     2m59s (x7 over 2m59s)  kubelet          Node no-preload-019660 status is now: NodeHasSufficientPID
	  Normal   NodeAllocatableEnforced  2m59s                  kubelet          Updated Node Allocatable limit across pods
	  Normal   Starting                 2m50s                  kubelet          Starting kubelet.
	  Normal   NodeAllocatableEnforced  2m50s                  kubelet          Updated Node Allocatable limit across pods
	  Normal   NodeHasSufficientMemory  2m50s                  kubelet          Node no-preload-019660 status is now: NodeHasSufficientMemory
	  Normal   NodeHasNoDiskPressure    2m50s                  kubelet          Node no-preload-019660 status is now: NodeHasNoDiskPressure
	  Normal   NodeHasSufficientPID     2m50s                  kubelet          Node no-preload-019660 status is now: NodeHasSufficientPID
	  Normal   NodeReady                2m46s                  kubelet          Node no-preload-019660 status is now: NodeReady
	  Normal   RegisteredNode           2m45s                  node-controller  Node no-preload-019660 event: Registered Node no-preload-019660 in Controller
	  Normal   Starting                 77s                    kubelet          Starting kubelet.
	  Normal   NodeAllocatableEnforced  77s                    kubelet          Updated Node Allocatable limit across pods
	  Normal   NodeHasNoDiskPressure    76s (x8 over 77s)      kubelet          Node no-preload-019660 status is now: NodeHasNoDiskPressure
	  Normal   NodeHasSufficientPID     76s (x7 over 77s)      kubelet          Node no-preload-019660 status is now: NodeHasSufficientPID
	  Normal   NodeHasSufficientMemory  76s (x8 over 77s)      kubelet          Node no-preload-019660 status is now: NodeHasSufficientMemory
	  Warning  Rebooted                 71s                    kubelet          Node no-preload-019660 has been rebooted, boot id: 7c4938cf-e087-4d48-94a0-7660c53890e7
	  Normal   RegisteredNode           68s                    node-controller  Node no-preload-019660 event: Registered Node no-preload-019660 in Controller
	  Normal   Starting                 5s                     kubelet          Starting kubelet.
	  Normal   NodeAllocatableEnforced  5s                     kubelet          Updated Node Allocatable limit across pods
	  Normal   NodeHasSufficientMemory  5s                     kubelet          Node no-preload-019660 status is now: NodeHasSufficientMemory
	  Normal   NodeHasNoDiskPressure    5s                     kubelet          Node no-preload-019660 status is now: NodeHasNoDiskPressure
	  Normal   NodeHasSufficientPID     5s                     kubelet          Node no-preload-019660 status is now: NodeHasSufficientPID
	
	
	==> dmesg <==
	[Nov23 08:56] Booted with the nomodeset parameter. Only the system framebuffer will be available
	[  +0.000011] acpi PNP0A03:00: fail to add MMCONFIG information, can't access extended configuration space under this bridge
	[  +0.001555] platform regulatory.0: Direct firmware load for regulatory.db failed with error -2
	[  +0.004890] (rpcbind)[121]: rpcbind.service: Referenced but unset environment variable evaluates to an empty string: RPCBIND_OPTIONS
	[  +0.922269] NFSD: Using /var/lib/nfs/v4recovery as the NFSv4 state recovery directory
	[  +0.000017] NFSD: unable to find recovery directory /var/lib/nfs/v4recovery
	[  +0.000002] NFSD: Unable to initialize client recovery tracking! (-2)
	[  +0.557715] kauditd_printk_skb: 29 callbacks suppressed
	[  +0.102404] kauditd_printk_skb: 421 callbacks suppressed
	[Nov23 08:57] kauditd_printk_skb: 165 callbacks suppressed
	[  +4.416704] kauditd_printk_skb: 134 callbacks suppressed
	[  +0.028951] kauditd_printk_skb: 144 callbacks suppressed
	[  +1.212600] kauditd_printk_skb: 93 callbacks suppressed
	[  +0.188677] kauditd_printk_skb: 78 callbacks suppressed
	[Nov23 08:58] kauditd_printk_skb: 35 callbacks suppressed
	
	
	==> etcd [78433f5a1dee] <==
	{"level":"info","ts":"2025-11-23T08:55:27.960210Z","caller":"traceutil/trace.go:172","msg":"trace[1913795349] transaction","detail":"{read_only:false; response_revision:359; number_of_response:1; }","duration":"132.125474ms","start":"2025-11-23T08:55:27.828070Z","end":"2025-11-23T08:55:27.960197Z","steps":["trace[1913795349] 'process raft request'  (duration: 130.470237ms)"],"step_count":1}
	{"level":"warn","ts":"2025-11-23T08:55:27.961326Z","caller":"txn/util.go:93","msg":"apply request took too long","took":"115.093447ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/serviceaccounts/kube-system/service-cidrs-controller\" limit:1 ","response":"range_response_count:1 size:214"}
	{"level":"info","ts":"2025-11-23T08:55:27.961420Z","caller":"traceutil/trace.go:172","msg":"trace[1979015044] range","detail":"{range_begin:/registry/serviceaccounts/kube-system/service-cidrs-controller; range_end:; response_count:1; response_revision:360; }","duration":"115.232691ms","start":"2025-11-23T08:55:27.846179Z","end":"2025-11-23T08:55:27.961412Z","steps":["trace[1979015044] 'agreement among raft nodes before linearized reading'  (duration: 114.979531ms)"],"step_count":1}
	{"level":"info","ts":"2025-11-23T08:55:27.964671Z","caller":"traceutil/trace.go:172","msg":"trace[1629415560] transaction","detail":"{read_only:false; response_revision:361; number_of_response:1; }","duration":"113.511815ms","start":"2025-11-23T08:55:27.851149Z","end":"2025-11-23T08:55:27.964661Z","steps":["trace[1629415560] 'process raft request'  (duration: 111.933576ms)"],"step_count":1}
	{"level":"info","ts":"2025-11-23T08:55:27.965851Z","caller":"traceutil/trace.go:172","msg":"trace[339398896] transaction","detail":"{read_only:false; response_revision:362; number_of_response:1; }","duration":"103.77975ms","start":"2025-11-23T08:55:27.862061Z","end":"2025-11-23T08:55:27.965841Z","steps":["trace[339398896] 'process raft request'  (duration: 102.247209ms)"],"step_count":1}
	{"level":"info","ts":"2025-11-23T08:55:52.232221Z","caller":"traceutil/trace.go:172","msg":"trace[991594023] transaction","detail":"{read_only:false; response_revision:463; number_of_response:1; }","duration":"138.295615ms","start":"2025-11-23T08:55:52.093898Z","end":"2025-11-23T08:55:52.232193Z","steps":["trace[991594023] 'process raft request'  (duration: 138.148011ms)"],"step_count":1}
	{"level":"info","ts":"2025-11-23T08:55:53.110050Z","caller":"traceutil/trace.go:172","msg":"trace[1408655835] transaction","detail":"{read_only:false; response_revision:464; number_of_response:1; }","duration":"111.465311ms","start":"2025-11-23T08:55:52.998570Z","end":"2025-11-23T08:55:53.110036Z","steps":["trace[1408655835] 'process raft request'  (duration: 111.386468ms)"],"step_count":1}
	{"level":"info","ts":"2025-11-23T08:56:16.343294Z","caller":"osutil/interrupt_unix.go:65","msg":"received signal; shutting down","signal":"terminated"}
	{"level":"info","ts":"2025-11-23T08:56:16.343638Z","caller":"embed/etcd.go:426","msg":"closing etcd server","name":"no-preload-019660","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.50.40:2380"],"advertise-client-urls":["https://192.168.50.40:2379"]}
	{"level":"error","ts":"2025-11-23T08:56:16.344971Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"http: Server closed","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*serveCtx).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/serve.go:90"}
	{"level":"error","ts":"2025-11-23T08:56:23.350843Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"http: Server closed","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*serveCtx).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/serve.go:90"}
	{"level":"error","ts":"2025-11-23T08:56:23.350926Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 127.0.0.1:2381: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
	{"level":"info","ts":"2025-11-23T08:56:23.350948Z","caller":"etcdserver/server.go:1281","msg":"skipped leadership transfer for single voting member cluster","local-member-id":"113a167c41258c81","current-leader-member-id":"113a167c41258c81"}
	{"level":"info","ts":"2025-11-23T08:56:23.351067Z","caller":"etcdserver/server.go:2342","msg":"server has stopped; stopping storage version's monitor"}
	{"level":"info","ts":"2025-11-23T08:56:23.351076Z","caller":"etcdserver/server.go:2319","msg":"server has stopped; stopping cluster version's monitor"}
	{"level":"warn","ts":"2025-11-23T08:56:23.353233Z","caller":"embed/serve.go:245","msg":"stopping secure grpc server due to error","error":"accept tcp 127.0.0.1:2379: use of closed network connection"}
	{"level":"warn","ts":"2025-11-23T08:56:23.353335Z","caller":"embed/serve.go:247","msg":"stopped secure grpc server due to error","error":"accept tcp 127.0.0.1:2379: use of closed network connection"}
	{"level":"error","ts":"2025-11-23T08:56:23.353344Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 127.0.0.1:2379: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
	{"level":"warn","ts":"2025-11-23T08:56:23.353381Z","caller":"embed/serve.go:245","msg":"stopping secure grpc server due to error","error":"accept tcp 192.168.50.40:2379: use of closed network connection"}
	{"level":"warn","ts":"2025-11-23T08:56:23.353419Z","caller":"embed/serve.go:247","msg":"stopped secure grpc server due to error","error":"accept tcp 192.168.50.40:2379: use of closed network connection"}
	{"level":"error","ts":"2025-11-23T08:56:23.353428Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 192.168.50.40:2379: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
	{"level":"info","ts":"2025-11-23T08:56:23.359157Z","caller":"embed/etcd.go:621","msg":"stopping serving peer traffic","address":"192.168.50.40:2380"}
	{"level":"error","ts":"2025-11-23T08:56:23.359253Z","caller":"embed/etcd.go:912","msg":"setting up serving from embedded etcd failed.","error":"accept tcp 192.168.50.40:2380: use of closed network connection","stacktrace":"go.etcd.io/etcd/server/v3/embed.(*Etcd).errHandler\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:912\ngo.etcd.io/etcd/server/v3/embed.(*Etcd).startHandler.func1\n\tgo.etcd.io/etcd/server/v3/embed/etcd.go:906"}
	{"level":"info","ts":"2025-11-23T08:56:23.359488Z","caller":"embed/etcd.go:626","msg":"stopped serving peer traffic","address":"192.168.50.40:2380"}
	{"level":"info","ts":"2025-11-23T08:56:23.359540Z","caller":"embed/etcd.go:428","msg":"closed etcd server","name":"no-preload-019660","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.50.40:2380"],"advertise-client-urls":["https://192.168.50.40:2379"]}
	
	
	==> etcd [8deb34aee6ea] <==
	{"level":"warn","ts":"2025-11-23T08:57:00.099710Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44330","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-11-23T08:57:00.113877Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44336","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-11-23T08:57:00.136374Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44356","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-11-23T08:57:00.145346Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44368","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-11-23T08:57:00.154857Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44394","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-11-23T08:57:00.171909Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44414","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-11-23T08:57:00.185801Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44422","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-11-23T08:57:00.191640Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44442","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-11-23T08:57:00.202370Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44456","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-11-23T08:57:00.212078Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44464","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-11-23T08:57:00.224299Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44490","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-11-23T08:57:00.239703Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44498","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-11-23T08:57:00.248343Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44522","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-11-23T08:57:00.259201Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44546","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-11-23T08:57:00.280884Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44576","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-11-23T08:57:00.303755Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44586","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-11-23T08:57:00.322303Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44610","server-name":"","error":"EOF"}
	{"level":"warn","ts":"2025-11-23T08:57:00.379317Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:44628","server-name":"","error":"EOF"}
	{"level":"info","ts":"2025-11-23T08:57:16.914297Z","caller":"traceutil/trace.go:172","msg":"trace[282693566] transaction","detail":"{read_only:false; response_revision:710; number_of_response:1; }","duration":"165.899912ms","start":"2025-11-23T08:57:16.748378Z","end":"2025-11-23T08:57:16.914278Z","steps":["trace[282693566] 'process raft request'  (duration: 165.731904ms)"],"step_count":1}
	{"level":"info","ts":"2025-11-23T08:57:17.891916Z","caller":"traceutil/trace.go:172","msg":"trace[845827594] linearizableReadLoop","detail":"{readStateIndex:756; appliedIndex:756; }","duration":"162.635779ms","start":"2025-11-23T08:57:17.729260Z","end":"2025-11-23T08:57:17.891896Z","steps":["trace[845827594] 'read index received'  (duration: 162.630099ms)","trace[845827594] 'applied index is now lower than readState.Index'  (duration: 4.7µs)"],"step_count":2}
	{"level":"warn","ts":"2025-11-23T08:57:17.892195Z","caller":"txn/util.go:93","msg":"apply request took too long","took":"162.847621ms","expected-duration":"100ms","prefix":"read-only range ","request":"limit:1 keys_only:true ","response":"range_response_count:0 size:5"}
	{"level":"info","ts":"2025-11-23T08:57:17.892577Z","caller":"traceutil/trace.go:172","msg":"trace[1595377469] transaction","detail":"{read_only:false; response_revision:712; number_of_response:1; }","duration":"262.918033ms","start":"2025-11-23T08:57:17.629632Z","end":"2025-11-23T08:57:17.892550Z","steps":["trace[1595377469] 'process raft request'  (duration: 262.820051ms)"],"step_count":1}
	{"level":"info","ts":"2025-11-23T08:57:17.892238Z","caller":"traceutil/trace.go:172","msg":"trace[1998076635] range","detail":"{range_begin:; range_end:; response_count:0; response_revision:711; }","duration":"162.976ms","start":"2025-11-23T08:57:17.729254Z","end":"2025-11-23T08:57:17.892230Z","steps":["trace[1998076635] 'agreement among raft nodes before linearized reading'  (duration: 162.824778ms)"],"step_count":1}
	{"level":"warn","ts":"2025-11-23T08:57:17.894716Z","caller":"txn/util.go:93","msg":"apply request took too long","took":"130.045976ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/health\" ","response":"range_response_count:0 size:5"}
	{"level":"info","ts":"2025-11-23T08:57:17.894762Z","caller":"traceutil/trace.go:172","msg":"trace[1496763416] range","detail":"{range_begin:/registry/health; range_end:; response_count:0; response_revision:712; }","duration":"130.105624ms","start":"2025-11-23T08:57:17.764650Z","end":"2025-11-23T08:57:17.894756Z","steps":["trace[1496763416] 'agreement among raft nodes before linearized reading'  (duration: 130.023549ms)"],"step_count":1}
	
	
	==> kernel <==
	 08:58:12 up 1 min,  0 users,  load average: 1.58, 0.55, 0.20
	Linux no-preload-019660 6.6.95 #1 SMP PREEMPT_DYNAMIC Wed Nov 19 01:10:03 UTC 2025 x86_64 GNU/Linux
	PRETTY_NAME="Buildroot 2025.02"
	
	
	==> kube-apiserver [51985d9c2b5e] <==
	W1123 08:56:25.707408       1 logging.go:55] [core] [Channel #135 SubChannel #137]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:25.731493       1 logging.go:55] [core] [Channel #63 SubChannel #65]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:25.801488       1 logging.go:55] [core] [Channel #199 SubChannel #201]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:25.835630       1 logging.go:55] [core] [Channel #147 SubChannel #149]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:25.837271       1 logging.go:55] [core] [Channel #251 SubChannel #253]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:25.885167       1 logging.go:55] [core] [Channel #47 SubChannel #49]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:25.919480       1 logging.go:55] [core] [Channel #139 SubChannel #141]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:25.953337       1 logging.go:55] [core] [Channel #91 SubChannel #93]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:25.992450       1 logging.go:55] [core] [Channel #191 SubChannel #193]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:26.001050       1 logging.go:55] [core] [Channel #175 SubChannel #177]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:26.027017       1 logging.go:55] [core] [Channel #115 SubChannel #117]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:26.043092       1 logging.go:55] [core] [Channel #159 SubChannel #161]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:26.075821       1 logging.go:55] [core] [Channel #83 SubChannel #85]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:26.087192       1 logging.go:55] [core] [Channel #67 SubChannel #69]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:26.108299       1 logging.go:55] [core] [Channel #207 SubChannel #209]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:26.143125       1 logging.go:55] [core] [Channel #227 SubChannel #229]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:26.143847       1 logging.go:55] [core] [Channel #27 SubChannel #29]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:26.168146       1 logging.go:55] [core] [Channel #31 SubChannel #33]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:26.194296       1 logging.go:55] [core] [Channel #55 SubChannel #57]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:26.217089       1 logging.go:55] [core] [Channel #143 SubChannel #145]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:26.284415       1 logging.go:55] [core] [Channel #39 SubChannel #41]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:26.304057       1 logging.go:55] [core] [Channel #127 SubChannel #129]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:26.351096       1 logging.go:55] [core] [Channel #151 SubChannel #153]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:26.358315       1 logging.go:55] [core] [Channel #107 SubChannel #109]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	W1123 08:56:26.398513       1 logging.go:55] [core] [Channel #179 SubChannel #181]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
	
	
	==> kube-apiserver [6929fc4394d1] <==
	W1123 08:57:02.240589       1 handler_proxy.go:99] no RequestInfo found in the context
	E1123 08:57:02.241169       1 controller.go:102] "Unhandled Error" err=<
		loading OpenAPI spec for "v1beta1.metrics.k8s.io" failed with: failed to download v1beta1.metrics.k8s.io: failed to retrieve openAPI spec, http error: ResponseCode: 503, Body: service unavailable
		, Header: map[Content-Type:[text/plain; charset=utf-8] X-Content-Type-Options:[nosniff]]
	 > logger="UnhandledError"
	I1123 08:57:02.242304       1 controller.go:109] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Rate Limited Requeue.
	I1123 08:57:03.447397       1 controller.go:667] quota admission added evaluator for: deployments.apps
	I1123 08:57:03.566737       1 controller.go:667] quota admission added evaluator for: daemonsets.apps
	I1123 08:57:03.633482       1 controller.go:667] quota admission added evaluator for: roles.rbac.authorization.k8s.io
	I1123 08:57:03.665173       1 controller.go:667] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
	I1123 08:57:04.456742       1 controller.go:667] quota admission added evaluator for: endpoints
	I1123 08:57:04.822296       1 controller.go:667] quota admission added evaluator for: endpointslices.discovery.k8s.io
	I1123 08:57:04.922886       1 controller.go:667] quota admission added evaluator for: replicasets.apps
	I1123 08:57:06.855489       1 controller.go:667] quota admission added evaluator for: namespaces
	I1123 08:57:07.352680       1 alloc.go:328] "allocated clusterIPs" service="kubernetes-dashboard/kubernetes-dashboard" clusterIPs={"IPv4":"10.100.252.132"}
	I1123 08:57:07.386303       1 alloc.go:328] "allocated clusterIPs" service="kubernetes-dashboard/dashboard-metrics-scraper" clusterIPs={"IPv4":"10.100.154.160"}
	W1123 08:58:06.568683       1 handler_proxy.go:99] no RequestInfo found in the context
	E1123 08:58:06.568889       1 controller.go:102] "Unhandled Error" err=<
		loading OpenAPI spec for "v1beta1.metrics.k8s.io" failed with: failed to download v1beta1.metrics.k8s.io: failed to retrieve openAPI spec, http error: ResponseCode: 503, Body: service unavailable
		, Header: map[Content-Type:[text/plain; charset=utf-8] X-Content-Type-Options:[nosniff]]
	 > logger="UnhandledError"
	I1123 08:58:06.569001       1 controller.go:109] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Rate Limited Requeue.
	W1123 08:58:06.583847       1 handler_proxy.go:99] no RequestInfo found in the context
	E1123 08:58:06.587393       1 controller.go:113] "Unhandled Error" err="loading OpenAPI spec for \"v1beta1.metrics.k8s.io\" failed with: Error, could not get list of group versions for APIService" logger="UnhandledError"
	I1123 08:58:06.587452       1 controller.go:126] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Rate Limited Requeue.
	
	
	==> kube-controller-manager [1a4750ff7e8c] <==
	I1123 08:57:04.478449       1 shared_informer.go:356] "Caches are synced" controller="ClusterRoleAggregator"
	I1123 08:57:04.488570       1 shared_informer.go:356] "Caches are synced" controller="resource quota"
	I1123 08:57:04.494373       1 shared_informer.go:356] "Caches are synced" controller="crt configmap"
	I1123 08:57:04.481772       1 shared_informer.go:356] "Caches are synced" controller="PV protection"
	I1123 08:57:04.502443       1 shared_informer.go:356] "Caches are synced" controller="endpoint_slice"
	I1123 08:57:04.502540       1 shared_informer.go:356] "Caches are synced" controller="deployment"
	I1123 08:57:04.506670       1 shared_informer.go:356] "Caches are synced" controller="ReplicaSet"
	I1123 08:57:04.510647       1 shared_informer.go:356] "Caches are synced" controller="endpoint_slice_mirroring"
	I1123 08:57:04.566367       1 garbagecollector.go:787] "failed to discover some groups" logger="garbage-collector-controller" groups="map[\"metrics.k8s.io/v1beta1\":\"stale GroupVersion discovery: metrics.k8s.io/v1beta1\"]"
	I1123 08:57:04.591835       1 shared_informer.go:349] "Waiting for caches to sync" controller="garbage collector"
	I1123 08:57:04.750206       1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
	I1123 08:57:04.750262       1 garbagecollector.go:154] "Garbage collector: all resource monitors have synced" logger="garbage-collector-controller"
	I1123 08:57:04.750270       1 garbagecollector.go:157] "Proceeding to collect garbage" logger="garbage-collector-controller"
	I1123 08:57:04.793332       1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
	E1123 08:57:07.066560       1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9\" failed with pods \"dashboard-metrics-scraper-6ffb444bf9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
	E1123 08:57:07.102507       1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9\" failed with pods \"dashboard-metrics-scraper-6ffb444bf9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
	E1123 08:57:07.134848       1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9\" failed with pods \"dashboard-metrics-scraper-6ffb444bf9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
	E1123 08:57:07.147364       1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9\" failed with pods \"dashboard-metrics-scraper-6ffb444bf9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
	E1123 08:57:07.152054       1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/kubernetes-dashboard-855c9754f9\" failed with pods \"kubernetes-dashboard-855c9754f9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
	E1123 08:57:07.176406       1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/kubernetes-dashboard-855c9754f9\" failed with pods \"kubernetes-dashboard-855c9754f9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
	E1123 08:57:07.177162       1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9\" failed with pods \"dashboard-metrics-scraper-6ffb444bf9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
	E1123 08:57:07.185205       1 replica_set.go:587] "Unhandled Error" err="sync \"kubernetes-dashboard/kubernetes-dashboard-855c9754f9\" failed with pods \"kubernetes-dashboard-855c9754f9-\" is forbidden: error looking up service account kubernetes-dashboard/kubernetes-dashboard: serviceaccount \"kubernetes-dashboard\" not found" logger="UnhandledError"
	I1123 08:57:14.479438       1 node_lifecycle_controller.go:1044] "Controller detected that some Nodes are Ready. Exiting master disruption mode" logger="node-lifecycle-controller"
	I1123 08:58:06.668391       1 garbagecollector.go:787] "failed to discover some groups" logger="garbage-collector-controller" groups="map[\"metrics.k8s.io/v1beta1\":\"stale GroupVersion discovery: metrics.k8s.io/v1beta1\"]"
	E1123 08:58:06.670861       1 resource_quota_controller.go:446] "Unhandled Error" err="unable to retrieve the complete list of server APIs: metrics.k8s.io/v1beta1: stale GroupVersion discovery: metrics.k8s.io/v1beta1" logger="UnhandledError"
	
	
	==> kube-controller-manager [e0963762dabe] <==
	I1123 08:55:27.305673       1 shared_informer.go:356] "Caches are synced" controller="VAC protection"
	I1123 08:55:27.305856       1 shared_informer.go:356] "Caches are synced" controller="disruption"
	I1123 08:55:27.305946       1 shared_informer.go:356] "Caches are synced" controller="namespace"
	I1123 08:55:27.307430       1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-legacy-unknown"
	I1123 08:55:27.307491       1 shared_informer.go:356] "Caches are synced" controller="TTL after finished"
	I1123 08:55:27.307769       1 shared_informer.go:356] "Caches are synced" controller="certificate-csrsigning-kube-apiserver-client"
	I1123 08:55:27.308002       1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
	I1123 08:55:27.311526       1 shared_informer.go:356] "Caches are synced" controller="taint-eviction-controller"
	I1123 08:55:27.320061       1 shared_informer.go:356] "Caches are synced" controller="node"
	I1123 08:55:27.320143       1 range_allocator.go:177] "Sending events to api server" logger="node-ipam-controller"
	I1123 08:55:27.320176       1 range_allocator.go:183] "Starting range CIDR allocator" logger="node-ipam-controller"
	I1123 08:55:27.320181       1 shared_informer.go:349] "Waiting for caches to sync" controller="cidrallocator"
	I1123 08:55:27.320186       1 shared_informer.go:356] "Caches are synced" controller="cidrallocator"
	I1123 08:55:27.323691       1 shared_informer.go:356] "Caches are synced" controller="endpoint_slice_mirroring"
	I1123 08:55:27.332119       1 shared_informer.go:356] "Caches are synced" controller="taint"
	I1123 08:55:27.332230       1 node_lifecycle_controller.go:1221] "Initializing eviction metric for zone" logger="node-lifecycle-controller" zone=""
	I1123 08:55:27.332307       1 node_lifecycle_controller.go:873] "Missing timestamp for Node. Assuming now as a timestamp" logger="node-lifecycle-controller" node="no-preload-019660"
	I1123 08:55:27.332344       1 node_lifecycle_controller.go:1067] "Controller detected that zone is now in new state" logger="node-lifecycle-controller" zone="" newState="Normal"
	I1123 08:55:27.353034       1 shared_informer.go:356] "Caches are synced" controller="validatingadmissionpolicy-status"
	I1123 08:55:27.353188       1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
	I1123 08:55:27.353234       1 garbagecollector.go:154] "Garbage collector: all resource monitors have synced" logger="garbage-collector-controller"
	I1123 08:55:27.353253       1 garbagecollector.go:157] "Proceeding to collect garbage" logger="garbage-collector-controller"
	I1123 08:55:27.355630       1 shared_informer.go:356] "Caches are synced" controller="ClusterRoleAggregator"
	I1123 08:55:27.356002       1 shared_informer.go:356] "Caches are synced" controller="resource quota"
	I1123 08:55:27.484870       1 range_allocator.go:428] "Set node PodCIDR" logger="node-ipam-controller" node="no-preload-019660" podCIDRs=["10.244.0.0/24"]
	
	
	==> kube-proxy [4aea324009fd] <==
	I1123 08:55:29.781436       1 shared_informer.go:349] "Waiting for caches to sync" controller="node informer cache"
	I1123 08:55:29.882143       1 shared_informer.go:356] "Caches are synced" controller="node informer cache"
	I1123 08:55:29.882176       1 server.go:219] "Successfully retrieved NodeIPs" NodeIPs=["192.168.50.40"]
	E1123 08:55:29.882244       1 server.go:256] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
	I1123 08:55:30.206875       1 server_linux.go:103] "No iptables support for family" ipFamily="IPv6" error=<
		error listing chain "POSTROUTING" in table "nat": exit status 3: ip6tables v1.8.9 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)
		Perhaps ip6tables or your kernel needs to be upgraded.
	 >
	I1123 08:55:30.209951       1 server.go:267] "kube-proxy running in single-stack mode" ipFamily="IPv4"
	I1123 08:55:30.210016       1 server_linux.go:132] "Using iptables Proxier"
	I1123 08:55:30.389394       1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
	I1123 08:55:30.398584       1 server.go:527] "Version info" version="v1.34.1"
	I1123 08:55:30.411854       1 server.go:529] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
	I1123 08:55:30.436371       1 config.go:106] "Starting endpoint slice config controller"
	I1123 08:55:30.436400       1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config"
	I1123 08:55:30.436421       1 config.go:403] "Starting serviceCIDR config controller"
	I1123 08:55:30.436428       1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config"
	I1123 08:55:30.441802       1 config.go:200] "Starting service config controller"
	I1123 08:55:30.441827       1 shared_informer.go:349] "Waiting for caches to sync" controller="service config"
	I1123 08:55:30.456879       1 config.go:309] "Starting node config controller"
	I1123 08:55:30.457052       1 shared_informer.go:349] "Waiting for caches to sync" controller="node config"
	I1123 08:55:30.457180       1 shared_informer.go:356] "Caches are synced" controller="node config"
	I1123 08:55:30.537976       1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config"
	I1123 08:55:30.542627       1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config"
	I1123 08:55:30.553889       1 shared_informer.go:356] "Caches are synced" controller="service config"
	
	
	==> kube-proxy [8c0537e27a6f] <==
	I1123 08:57:04.109885       1 shared_informer.go:349] "Waiting for caches to sync" controller="node informer cache"
	I1123 08:57:04.212001       1 shared_informer.go:356] "Caches are synced" controller="node informer cache"
	I1123 08:57:04.212377       1 server.go:219] "Successfully retrieved NodeIPs" NodeIPs=["192.168.50.40"]
	E1123 08:57:04.212492       1 server.go:256] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
	I1123 08:57:04.308881       1 server_linux.go:103] "No iptables support for family" ipFamily="IPv6" error=<
		error listing chain "POSTROUTING" in table "nat": exit status 3: ip6tables v1.8.9 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)
		Perhaps ip6tables or your kernel needs to be upgraded.
	 >
	I1123 08:57:04.309495       1 server.go:267] "kube-proxy running in single-stack mode" ipFamily="IPv4"
	I1123 08:57:04.309923       1 server_linux.go:132] "Using iptables Proxier"
	I1123 08:57:04.335219       1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
	I1123 08:57:04.338659       1 server.go:527] "Version info" version="v1.34.1"
	I1123 08:57:04.339118       1 server.go:529] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
	I1123 08:57:04.356711       1 config.go:200] "Starting service config controller"
	I1123 08:57:04.358780       1 shared_informer.go:349] "Waiting for caches to sync" controller="service config"
	I1123 08:57:04.357281       1 config.go:403] "Starting serviceCIDR config controller"
	I1123 08:57:04.360751       1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config"
	I1123 08:57:04.359340       1 config.go:309] "Starting node config controller"
	I1123 08:57:04.361083       1 shared_informer.go:349] "Waiting for caches to sync" controller="node config"
	I1123 08:57:04.361217       1 shared_informer.go:356] "Caches are synced" controller="node config"
	I1123 08:57:04.357261       1 config.go:106] "Starting endpoint slice config controller"
	I1123 08:57:04.361454       1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config"
	I1123 08:57:04.461112       1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config"
	I1123 08:57:04.461168       1 shared_informer.go:356] "Caches are synced" controller="service config"
	I1123 08:57:04.466392       1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config"
	
	
	==> kube-scheduler [266be5a40ca6] <==
	I1123 08:56:59.176913       1 serving.go:386] Generated self-signed cert in-memory
	W1123 08:57:01.157665       1 requestheader_controller.go:204] Unable to get configmap/extension-apiserver-authentication in kube-system.  Usually fixed by 'kubectl create rolebinding -n kube-system ROLEBINDING_NAME --role=extension-apiserver-authentication-reader --serviceaccount=YOUR_NS:YOUR_SA'
	W1123 08:57:01.157869       1 authentication.go:397] Error looking up in-cluster authentication configuration: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot get resource "configmaps" in API group "" in the namespace "kube-system"
	W1123 08:57:01.157944       1 authentication.go:398] Continuing without authentication configuration. This may treat all requests as anonymous.
	W1123 08:57:01.158050       1 authentication.go:399] To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false
	I1123 08:57:01.217478       1 server.go:175] "Starting Kubernetes Scheduler" version="v1.34.1"
	I1123 08:57:01.217604       1 server.go:177] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
	I1123 08:57:01.228584       1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
	I1123 08:57:01.229023       1 shared_informer.go:349] "Waiting for caches to sync" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
	I1123 08:57:01.231067       1 secure_serving.go:211] Serving securely on 127.0.0.1:10259
	I1123 08:57:01.231467       1 tlsconfig.go:243] "Starting DynamicServingCertificateController"
	I1123 08:57:01.329575       1 shared_informer.go:356] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
	
	
	==> kube-scheduler [57bb06d26ab6] <==
	E1123 08:55:19.477132       1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIStorageCapacity"
	E1123 08:55:19.476999       1 reflector.go:205] "Failed to watch" err="failed to list *v1.ResourceSlice: resourceslices.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceslices\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ResourceSlice"
	E1123 08:55:19.477074       1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolume"
	E1123 08:55:19.478217       1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicationcontrollers\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicationController"
	E1123 08:55:19.478832       1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csinodes\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSINode"
	E1123 08:55:19.479554       1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver"
	E1123 08:55:19.480141       1 reflector.go:205] "Failed to watch" err="failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"storageclasses\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StorageClass"
	E1123 08:55:19.480165       1 reflector.go:205] "Failed to watch" err="failed to list *v1.Namespace: namespaces is forbidden: User \"system:kube-scheduler\" cannot list resource \"namespaces\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Namespace"
	E1123 08:55:19.480360       1 reflector.go:205] "Failed to watch" err="failed to list *v1.ResourceClaim: resourceclaims.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceclaims\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ResourceClaim"
	E1123 08:55:19.480372       1 reflector.go:205] "Failed to watch" err="failed to list *v1.VolumeAttachment: volumeattachments.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"volumeattachments\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.VolumeAttachment"
	E1123 08:55:19.480530       1 reflector.go:205] "Failed to watch" err="failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PodDisruptionBudget"
	E1123 08:55:19.480623       1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicasets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicaSet"
	E1123 08:55:19.481197       1 reflector.go:205] "Failed to watch" err="failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"statefulsets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StatefulSet"
	E1123 08:55:19.482165       1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolumeClaim"
	E1123 08:55:20.289908       1 reflector.go:205] "Failed to watch" err="failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"statefulsets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StatefulSet"
	E1123 08:55:20.337370       1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolumeClaim"
	E1123 08:55:20.366302       1 reflector.go:205] "Failed to watch" err="failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PodDisruptionBudget"
	E1123 08:55:20.425798       1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicationcontrollers\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicationController"
	E1123 08:55:20.483335       1 reflector.go:205] "Failed to watch" err="failed to list *v1.ConfigMap: configmaps \"extension-apiserver-authentication\" is forbidden: User \"system:kube-scheduler\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\"" logger="UnhandledError" reflector="runtime/asm_amd64.s:1700" type="*v1.ConfigMap"
	E1123 08:55:20.494282       1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolume"
	I1123 08:55:23.055993       1 shared_informer.go:356] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
	I1123 08:56:16.316839       1 secure_serving.go:259] Stopped listening on 127.0.0.1:10259
	I1123 08:56:16.317595       1 server.go:263] "[graceful-termination] secure server has stopped listening"
	I1123 08:56:16.317742       1 server.go:265] "[graceful-termination] secure server is exiting"
	E1123 08:56:16.317790       1 run.go:72] "command failed" err="finished without leader elect"
	
	
	==> kubelet <==
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.220241    4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/35ab3b3769cfe633089649c537c4c291-k8s-certs\") pod \"kube-apiserver-no-preload-019660\" (UID: \"35ab3b3769cfe633089649c537c4c291\") " pod="kube-system/kube-apiserver-no-preload-019660"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.220309    4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"flexvolume-dir\" (UniqueName: \"kubernetes.io/host-path/d429a3b7e987da373f8ab85d75d6e509-flexvolume-dir\") pod \"kube-controller-manager-no-preload-019660\" (UID: \"d429a3b7e987da373f8ab85d75d6e509\") " pod="kube-system/kube-controller-manager-no-preload-019660"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.220345    4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/d429a3b7e987da373f8ab85d75d6e509-kubeconfig\") pod \"kube-controller-manager-no-preload-019660\" (UID: \"d429a3b7e987da373f8ab85d75d6e509\") " pod="kube-system/kube-controller-manager-no-preload-019660"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.220366    4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/d429a3b7e987da373f8ab85d75d6e509-usr-share-ca-certificates\") pod \"kube-controller-manager-no-preload-019660\" (UID: \"d429a3b7e987da373f8ab85d75d6e509\") " pod="kube-system/kube-controller-manager-no-preload-019660"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.220392    4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/35ab3b3769cfe633089649c537c4c291-ca-certs\") pod \"kube-apiserver-no-preload-019660\" (UID: \"35ab3b3769cfe633089649c537c4c291\") " pod="kube-system/kube-apiserver-no-preload-019660"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.220412    4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/35ab3b3769cfe633089649c537c4c291-usr-share-ca-certificates\") pod \"kube-apiserver-no-preload-019660\" (UID: \"35ab3b3769cfe633089649c537c4c291\") " pod="kube-system/kube-apiserver-no-preload-019660"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.220431    4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/d429a3b7e987da373f8ab85d75d6e509-ca-certs\") pod \"kube-controller-manager-no-preload-019660\" (UID: \"d429a3b7e987da373f8ab85d75d6e509\") " pod="kube-system/kube-controller-manager-no-preload-019660"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.220451    4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/d429a3b7e987da373f8ab85d75d6e509-k8s-certs\") pod \"kube-controller-manager-no-preload-019660\" (UID: \"d429a3b7e987da373f8ab85d75d6e509\") " pod="kube-system/kube-controller-manager-no-preload-019660"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.220473    4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/0bd61e39ef27cab83cc049d81d34254c-kubeconfig\") pod \"kube-scheduler-no-preload-019660\" (UID: \"0bd61e39ef27cab83cc049d81d34254c\") " pod="kube-system/kube-scheduler-no-preload-019660"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.223516    4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-certs\" (UniqueName: \"kubernetes.io/host-path/4765da838683051a5b8aa163156bdc40-etcd-certs\") pod \"etcd-no-preload-019660\" (UID: \"4765da838683051a5b8aa163156bdc40\") " pod="kube-system/etcd-no-preload-019660"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.224048    4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-data\" (UniqueName: \"kubernetes.io/host-path/4765da838683051a5b8aa163156bdc40-etcd-data\") pod \"etcd-no-preload-019660\" (UID: \"4765da838683051a5b8aa163156bdc40\") " pod="kube-system/etcd-no-preload-019660"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.281626    4357 apiserver.go:52] "Watching apiserver"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.354823    4357 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.428002    4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/92a336c6-9d96-4484-8903-9542086c298e-tmp\") pod \"storage-provisioner\" (UID: \"92a336c6-9d96-4484-8903-9542086c298e\") " pod="kube-system/storage-provisioner"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.428072    4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/fb442967-1590-4196-a0b8-1ed0320182cd-xtables-lock\") pod \"kube-proxy-wlb9w\" (UID: \"fb442967-1590-4196-a0b8-1ed0320182cd\") " pod="kube-system/kube-proxy-wlb9w"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.428146    4357 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/fb442967-1590-4196-a0b8-1ed0320182cd-lib-modules\") pod \"kube-proxy-wlb9w\" (UID: \"fb442967-1590-4196-a0b8-1ed0320182cd\") " pod="kube-system/kube-proxy-wlb9w"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: I1123 08:58:08.612741    4357 scope.go:117] "RemoveContainer" containerID="1f0a2f0aefa9b826288b8b721a751f41c880f8daa0983c581ae8b039871db1a1"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: E1123 08:58:08.874748    4357 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" image="registry.k8s.io/echoserver:1.4"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: E1123 08:58:08.877286    4357 kuberuntime_image.go:43] "Failed to pull image" err="Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" image="registry.k8s.io/echoserver:1.4"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: E1123 08:58:08.878430    4357 kuberuntime_manager.go:1449] "Unhandled Error" err="container dashboard-metrics-scraper start failed in pod dashboard-metrics-scraper-6ffb444bf9-4965t_kubernetes-dashboard(d4a9e601-4647-40d6-a5d8-db1e8e067281): ErrImagePull: Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/" logger="UnhandledError"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: E1123 08:58:08.878855    4357 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dashboard-metrics-scraper\" with ErrImagePull: \"Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of registry.k8s.io/echoserver:1.4 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/\"" pod="kubernetes-dashboard/dashboard-metrics-scraper-6ffb444bf9-4965t" podUID="d4a9e601-4647-40d6-a5d8-db1e8e067281"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: E1123 08:58:08.918928    4357 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host" image="fake.domain/registry.k8s.io/echoserver:1.4"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: E1123 08:58:08.919810    4357 kuberuntime_image.go:43] "Failed to pull image" err="Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host" image="fake.domain/registry.k8s.io/echoserver:1.4"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: E1123 08:58:08.921110    4357 kuberuntime_manager.go:1449] "Unhandled Error" err="container metrics-server start failed in pod metrics-server-746fcd58dc-tg8q5_kube-system(fb0df7df-58f1-4b52-8193-e19d66dd95bf): ErrImagePull: Error response from daemon: Get \"https://fake.domain/v2/\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host" logger="UnhandledError"
	Nov 23 08:58:08 no-preload-019660 kubelet[4357]: E1123 08:58:08.921171    4357 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"metrics-server\" with ErrImagePull: \"Error response from daemon: Get \\\"https://fake.domain/v2/\\\": dial tcp: lookup fake.domain on 192.168.122.1:53: no such host\"" pod="kube-system/metrics-server-746fcd58dc-tg8q5" podUID="fb0df7df-58f1-4b52-8193-e19d66dd95bf"
	
	
	==> kubernetes-dashboard [57ebcdb97431] <==
	2025/11/23 08:57:20 Starting overwatch
	2025/11/23 08:57:20 Using namespace: kubernetes-dashboard
	2025/11/23 08:57:20 Using in-cluster config to connect to apiserver
	2025/11/23 08:57:20 Using secret token for csrf signing
	2025/11/23 08:57:20 Initializing csrf token from kubernetes-dashboard-csrf secret
	2025/11/23 08:57:20 Empty token. Generating and storing in a secret kubernetes-dashboard-csrf
	2025/11/23 08:57:20 Successful initial request to the apiserver, version: v1.34.1
	2025/11/23 08:57:20 Generating JWE encryption key
	2025/11/23 08:57:20 New synchronizer has been registered: kubernetes-dashboard-key-holder-kubernetes-dashboard. Starting
	2025/11/23 08:57:20 Starting secret synchronizer for kubernetes-dashboard-key-holder in namespace kubernetes-dashboard
	2025/11/23 08:57:21 Initializing JWE encryption key from synchronized object
	2025/11/23 08:57:21 Creating in-cluster Sidecar client
	2025/11/23 08:57:21 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
	2025/11/23 08:57:21 Serving insecurely on HTTP port: 9090
	2025/11/23 08:58:06 Metric client health check failed: the server is currently unable to handle the request (get services dashboard-metrics-scraper). Retrying in 30 seconds.
	
	
	==> storage-provisioner [1f0a2f0aefa9] <==
	I1123 08:57:03.436717       1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
	F1123 08:57:33.518183       1 main.go:39] error getting server version: Get "https://10.96.0.1:443/version?timeout=32s": dial tcp 10.96.0.1:443: i/o timeout
	
	
	==> storage-provisioner [371de4a46890] <==
	I1123 08:58:09.007550       1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
	I1123 08:58:09.042381       1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
	I1123 08:58:09.044488       1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
	W1123 08:58:09.057366       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	W1123 08:58:12.516323       1 warnings.go:70] v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice
	

                                                
                                                
-- /stdout --
helpers_test.go:262: (dbg) Run:  out/minikube-linux-amd64 status --format={{.APIServer}} -p no-preload-019660 -n no-preload-019660
helpers_test.go:269: (dbg) Run:  kubectl --context no-preload-019660 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:280: non-running pods: metrics-server-746fcd58dc-tg8q5 dashboard-metrics-scraper-6ffb444bf9-4965t
helpers_test.go:282: ======> post-mortem[TestStartStop/group/no-preload/serial/Pause]: describe non-running pods <======
helpers_test.go:285: (dbg) Run:  kubectl --context no-preload-019660 describe pod metrics-server-746fcd58dc-tg8q5 dashboard-metrics-scraper-6ffb444bf9-4965t
helpers_test.go:285: (dbg) Non-zero exit: kubectl --context no-preload-019660 describe pod metrics-server-746fcd58dc-tg8q5 dashboard-metrics-scraper-6ffb444bf9-4965t: exit status 1 (93.005208ms)

                                                
                                                
** stderr ** 
	Error from server (NotFound): pods "metrics-server-746fcd58dc-tg8q5" not found
	Error from server (NotFound): pods "dashboard-metrics-scraper-6ffb444bf9-4965t" not found

                                                
                                                
** /stderr **
helpers_test.go:287: kubectl --context no-preload-019660 describe pod metrics-server-746fcd58dc-tg8q5 dashboard-metrics-scraper-6ffb444bf9-4965t: exit status 1
--- FAIL: TestStartStop/group/no-preload/serial/Pause (40.50s)

                                                
                                    
x
+
TestISOImage/PersistentMounts//data (0s)

                                                
                                                
=== RUN   TestISOImage/PersistentMounts//data
=== PAUSE TestISOImage/PersistentMounts//data

                                                
                                                

                                                
                                                
=== CONT  TestISOImage/PersistentMounts//data
iso_test.go:97: (dbg) Run:  out/minikube-linux-amd64 -p guest-773058 ssh "df -t ext4 /data | grep /data"
iso_test.go:97: (dbg) Non-zero exit: out/minikube-linux-amd64 -p guest-773058 ssh "df -t ext4 /data | grep /data": context deadline exceeded (2.289µs)
iso_test.go:99: failed to verify existence of "/data" mount. args "out/minikube-linux-amd64 -p guest-773058 ssh \"df -t ext4 /data | grep /data\"": context deadline exceeded
--- FAIL: TestISOImage/PersistentMounts//data (0.00s)

                                                
                                    
x
+
TestISOImage/PersistentMounts//var/lib/docker (0s)

                                                
                                                
=== RUN   TestISOImage/PersistentMounts//var/lib/docker
=== PAUSE TestISOImage/PersistentMounts//var/lib/docker

                                                
                                                

                                                
                                                
=== CONT  TestISOImage/PersistentMounts//var/lib/docker
iso_test.go:97: (dbg) Run:  out/minikube-linux-amd64 -p guest-773058 ssh "df -t ext4 /var/lib/docker | grep /var/lib/docker"
iso_test.go:97: (dbg) Non-zero exit: out/minikube-linux-amd64 -p guest-773058 ssh "df -t ext4 /var/lib/docker | grep /var/lib/docker": context deadline exceeded (553ns)
iso_test.go:99: failed to verify existence of "/var/lib/docker" mount. args "out/minikube-linux-amd64 -p guest-773058 ssh \"df -t ext4 /var/lib/docker | grep /var/lib/docker\"": context deadline exceeded
--- FAIL: TestISOImage/PersistentMounts//var/lib/docker (0.00s)

                                                
                                    
x
+
TestISOImage/PersistentMounts//var/lib/cni (0s)

                                                
                                                
=== RUN   TestISOImage/PersistentMounts//var/lib/cni
=== PAUSE TestISOImage/PersistentMounts//var/lib/cni

                                                
                                                

                                                
                                                
=== CONT  TestISOImage/PersistentMounts//var/lib/cni
iso_test.go:97: (dbg) Run:  out/minikube-linux-amd64 -p guest-773058 ssh "df -t ext4 /var/lib/cni | grep /var/lib/cni"
iso_test.go:97: (dbg) Non-zero exit: out/minikube-linux-amd64 -p guest-773058 ssh "df -t ext4 /var/lib/cni | grep /var/lib/cni": context deadline exceeded (388ns)
iso_test.go:99: failed to verify existence of "/var/lib/cni" mount. args "out/minikube-linux-amd64 -p guest-773058 ssh \"df -t ext4 /var/lib/cni | grep /var/lib/cni\"": context deadline exceeded
--- FAIL: TestISOImage/PersistentMounts//var/lib/cni (0.00s)

                                                
                                    
x
+
TestISOImage/PersistentMounts//var/lib/kubelet (0s)

                                                
                                                
=== RUN   TestISOImage/PersistentMounts//var/lib/kubelet
=== PAUSE TestISOImage/PersistentMounts//var/lib/kubelet

                                                
                                                

                                                
                                                
=== CONT  TestISOImage/PersistentMounts//var/lib/kubelet
iso_test.go:97: (dbg) Run:  out/minikube-linux-amd64 -p guest-773058 ssh "df -t ext4 /var/lib/kubelet | grep /var/lib/kubelet"
iso_test.go:97: (dbg) Non-zero exit: out/minikube-linux-amd64 -p guest-773058 ssh "df -t ext4 /var/lib/kubelet | grep /var/lib/kubelet": context deadline exceeded (498ns)
iso_test.go:99: failed to verify existence of "/var/lib/kubelet" mount. args "out/minikube-linux-amd64 -p guest-773058 ssh \"df -t ext4 /var/lib/kubelet | grep /var/lib/kubelet\"": context deadline exceeded
--- FAIL: TestISOImage/PersistentMounts//var/lib/kubelet (0.00s)

                                                
                                    
x
+
TestISOImage/PersistentMounts//var/lib/minikube (0s)

                                                
                                                
=== RUN   TestISOImage/PersistentMounts//var/lib/minikube
=== PAUSE TestISOImage/PersistentMounts//var/lib/minikube

                                                
                                                

                                                
                                                
=== CONT  TestISOImage/PersistentMounts//var/lib/minikube
iso_test.go:97: (dbg) Run:  out/minikube-linux-amd64 -p guest-773058 ssh "df -t ext4 /var/lib/minikube | grep /var/lib/minikube"
iso_test.go:97: (dbg) Non-zero exit: out/minikube-linux-amd64 -p guest-773058 ssh "df -t ext4 /var/lib/minikube | grep /var/lib/minikube": context deadline exceeded (575ns)
iso_test.go:99: failed to verify existence of "/var/lib/minikube" mount. args "out/minikube-linux-amd64 -p guest-773058 ssh \"df -t ext4 /var/lib/minikube | grep /var/lib/minikube\"": context deadline exceeded
--- FAIL: TestISOImage/PersistentMounts//var/lib/minikube (0.00s)

                                                
                                    
x
+
TestISOImage/PersistentMounts//var/lib/toolbox (0s)

                                                
                                                
=== RUN   TestISOImage/PersistentMounts//var/lib/toolbox
=== PAUSE TestISOImage/PersistentMounts//var/lib/toolbox

                                                
                                                

                                                
                                                
=== CONT  TestISOImage/PersistentMounts//var/lib/toolbox
iso_test.go:97: (dbg) Run:  out/minikube-linux-amd64 -p guest-773058 ssh "df -t ext4 /var/lib/toolbox | grep /var/lib/toolbox"
iso_test.go:97: (dbg) Non-zero exit: out/minikube-linux-amd64 -p guest-773058 ssh "df -t ext4 /var/lib/toolbox | grep /var/lib/toolbox": context deadline exceeded (324ns)
iso_test.go:99: failed to verify existence of "/var/lib/toolbox" mount. args "out/minikube-linux-amd64 -p guest-773058 ssh \"df -t ext4 /var/lib/toolbox | grep /var/lib/toolbox\"": context deadline exceeded
--- FAIL: TestISOImage/PersistentMounts//var/lib/toolbox (0.00s)

                                                
                                    
x
+
TestISOImage/PersistentMounts//var/lib/boot2docker (0s)

                                                
                                                
=== RUN   TestISOImage/PersistentMounts//var/lib/boot2docker
=== PAUSE TestISOImage/PersistentMounts//var/lib/boot2docker

                                                
                                                

                                                
                                                
=== CONT  TestISOImage/PersistentMounts//var/lib/boot2docker
iso_test.go:97: (dbg) Run:  out/minikube-linux-amd64 -p guest-773058 ssh "df -t ext4 /var/lib/boot2docker | grep /var/lib/boot2docker"
iso_test.go:97: (dbg) Non-zero exit: out/minikube-linux-amd64 -p guest-773058 ssh "df -t ext4 /var/lib/boot2docker | grep /var/lib/boot2docker": context deadline exceeded (631ns)
iso_test.go:99: failed to verify existence of "/var/lib/boot2docker" mount. args "out/minikube-linux-amd64 -p guest-773058 ssh \"df -t ext4 /var/lib/boot2docker | grep /var/lib/boot2docker\"": context deadline exceeded
--- FAIL: TestISOImage/PersistentMounts//var/lib/boot2docker (0.00s)

                                                
                                    
x
+
TestISOImage/VersionJSON (0s)

                                                
                                                
=== RUN   TestISOImage/VersionJSON
iso_test.go:106: (dbg) Run:  out/minikube-linux-amd64 -p guest-773058 ssh "cat /version.json"
iso_test.go:106: (dbg) Non-zero exit: out/minikube-linux-amd64 -p guest-773058 ssh "cat /version.json": context deadline exceeded (2.821µs)
iso_test.go:108: failed to read /version.json. args "out/minikube-linux-amd64 -p guest-773058 ssh \"cat /version.json\"": context deadline exceeded
--- FAIL: TestISOImage/VersionJSON (0.00s)

                                                
                                    
x
+
TestISOImage/eBPFSupport (0s)

                                                
                                                
=== RUN   TestISOImage/eBPFSupport
iso_test.go:125: (dbg) Run:  out/minikube-linux-amd64 -p guest-773058 ssh "test -f /sys/kernel/btf/vmlinux && echo 'OK' || echo 'NOT FOUND'"
iso_test.go:125: (dbg) Non-zero exit: out/minikube-linux-amd64 -p guest-773058 ssh "test -f /sys/kernel/btf/vmlinux && echo 'OK' || echo 'NOT FOUND'": context deadline exceeded (1.58µs)
iso_test.go:127: failed to verify existence of "/sys/kernel/btf/vmlinux" file: args "out/minikube-linux-amd64 -p guest-773058 ssh \"test -f /sys/kernel/btf/vmlinux && echo 'OK' || echo 'NOT FOUND'\"": context deadline exceeded
iso_test.go:131: expected file "/sys/kernel/btf/vmlinux" to exist, but it does not. BTF types are required for CO-RE eBPF programs; set CONFIG_DEBUG_INFO_BTF in kernel configuration.
--- FAIL: TestISOImage/eBPFSupport (0.00s)

                                                
                                    

Test pass (322/366)

Order passed test Duration
3 TestDownloadOnly/v1.28.0/json-events 6.77
4 TestDownloadOnly/v1.28.0/preload-exists 0
8 TestDownloadOnly/v1.28.0/LogsDuration 0.07
9 TestDownloadOnly/v1.28.0/DeleteAll 0.16
10 TestDownloadOnly/v1.28.0/DeleteAlwaysSucceeds 0.14
12 TestDownloadOnly/v1.34.1/json-events 3.06
13 TestDownloadOnly/v1.34.1/preload-exists 0
17 TestDownloadOnly/v1.34.1/LogsDuration 0.08
18 TestDownloadOnly/v1.34.1/DeleteAll 0.16
19 TestDownloadOnly/v1.34.1/DeleteAlwaysSucceeds 0.14
21 TestBinaryMirror 0.64
22 TestOffline 118.44
25 TestAddons/PreSetup/EnablingAddonOnNonExistingCluster 0.06
26 TestAddons/PreSetup/DisablingAddonOnNonExistingCluster 0.07
27 TestAddons/Setup 212.76
29 TestAddons/serial/Volcano 43.34
31 TestAddons/serial/GCPAuth/Namespaces 0.11
32 TestAddons/serial/GCPAuth/FakeCredentials 9.57
35 TestAddons/parallel/Registry 16.61
36 TestAddons/parallel/RegistryCreds 0.64
37 TestAddons/parallel/Ingress 23
38 TestAddons/parallel/InspektorGadget 11.89
39 TestAddons/parallel/MetricsServer 6.42
41 TestAddons/parallel/CSI 38.99
42 TestAddons/parallel/Headlamp 22.4
43 TestAddons/parallel/CloudSpanner 6.79
44 TestAddons/parallel/LocalPath 57.96
45 TestAddons/parallel/NvidiaDevicePlugin 6.67
46 TestAddons/parallel/Yakd 12.74
48 TestAddons/StoppedEnableDisable 14.32
49 TestCertOptions 60.42
50 TestCertExpiration 316.83
51 TestDockerFlags 59.65
52 TestForceSystemdFlag 69.53
53 TestForceSystemdEnv 51.49
58 TestErrorSpam/setup 43.06
59 TestErrorSpam/start 0.33
60 TestErrorSpam/status 0.69
61 TestErrorSpam/pause 1.29
62 TestErrorSpam/unpause 1.58
63 TestErrorSpam/stop 16.62
66 TestFunctional/serial/CopySyncFile 0
67 TestFunctional/serial/StartWithProxy 83.84
68 TestFunctional/serial/AuditLog 0
69 TestFunctional/serial/SoftStart 54.35
70 TestFunctional/serial/KubeContext 0.04
71 TestFunctional/serial/KubectlGetPods 0.09
74 TestFunctional/serial/CacheCmd/cache/add_remote 2.18
75 TestFunctional/serial/CacheCmd/cache/add_local 1.3
76 TestFunctional/serial/CacheCmd/cache/CacheDelete 0.06
77 TestFunctional/serial/CacheCmd/cache/list 0.06
78 TestFunctional/serial/CacheCmd/cache/verify_cache_inside_node 0.18
79 TestFunctional/serial/CacheCmd/cache/cache_reload 1.04
80 TestFunctional/serial/CacheCmd/cache/delete 0.12
81 TestFunctional/serial/MinikubeKubectlCmd 0.12
82 TestFunctional/serial/MinikubeKubectlCmdDirectly 0.11
83 TestFunctional/serial/ExtraConfig 55.76
84 TestFunctional/serial/ComponentHealth 0.07
85 TestFunctional/serial/LogsCmd 1.06
86 TestFunctional/serial/LogsFileCmd 1.02
87 TestFunctional/serial/InvalidService 3.91
89 TestFunctional/parallel/ConfigCmd 0.4
90 TestFunctional/parallel/DashboardCmd 42.63
91 TestFunctional/parallel/DryRun 0.24
92 TestFunctional/parallel/InternationalLanguage 0.13
93 TestFunctional/parallel/StatusCmd 0.68
97 TestFunctional/parallel/ServiceCmdConnect 9.5
98 TestFunctional/parallel/AddonsCmd 0.16
99 TestFunctional/parallel/PersistentVolumeClaim 54.83
101 TestFunctional/parallel/SSHCmd 0.36
102 TestFunctional/parallel/CpCmd 1.21
103 TestFunctional/parallel/MySQL 39.87
104 TestFunctional/parallel/FileSync 0.19
105 TestFunctional/parallel/CertSync 1.21
109 TestFunctional/parallel/NodeLabels 0.06
111 TestFunctional/parallel/NonActiveRuntimeDisabled 0.19
113 TestFunctional/parallel/License 0.44
114 TestFunctional/parallel/Version/short 0.06
115 TestFunctional/parallel/Version/components 0.65
116 TestFunctional/parallel/DockerEnv/bash 0.85
117 TestFunctional/parallel/UpdateContextCmd/no_changes 0.07
118 TestFunctional/parallel/UpdateContextCmd/no_minikube_cluster 0.11
119 TestFunctional/parallel/UpdateContextCmd/no_clusters 0.07
120 TestFunctional/parallel/ServiceCmd/DeployApp 10.2
130 TestFunctional/parallel/ImageCommands/ImageListShort 0.21
131 TestFunctional/parallel/ImageCommands/ImageListTable 0.19
132 TestFunctional/parallel/ImageCommands/ImageListJson 0.21
133 TestFunctional/parallel/ImageCommands/ImageListYaml 0.25
134 TestFunctional/parallel/ImageCommands/ImageBuild 4.39
135 TestFunctional/parallel/ImageCommands/Setup 1.51
136 TestFunctional/parallel/ImageCommands/ImageLoadDaemon 0.89
137 TestFunctional/parallel/ImageCommands/ImageReloadDaemon 0.71
138 TestFunctional/parallel/ImageCommands/ImageTagAndLoadDaemon 1.36
139 TestFunctional/parallel/ImageCommands/ImageSaveToFile 0.32
140 TestFunctional/parallel/ImageCommands/ImageRemove 0.35
141 TestFunctional/parallel/ImageCommands/ImageLoadFromFile 0.54
142 TestFunctional/parallel/ImageCommands/ImageSaveDaemon 0.43
143 TestFunctional/parallel/ProfileCmd/profile_not_create 0.32
144 TestFunctional/parallel/ProfileCmd/profile_list 0.31
145 TestFunctional/parallel/ProfileCmd/profile_json_output 0.31
146 TestFunctional/parallel/MountCmd/any-port 14.34
147 TestFunctional/parallel/ServiceCmd/List 0.32
148 TestFunctional/parallel/ServiceCmd/JSONOutput 0.51
149 TestFunctional/parallel/ServiceCmd/HTTPS 0.3
150 TestFunctional/parallel/ServiceCmd/Format 0.27
151 TestFunctional/parallel/ServiceCmd/URL 0.24
152 TestFunctional/parallel/MountCmd/specific-port 1.66
153 TestFunctional/parallel/MountCmd/VerifyCleanup 1.2
154 TestFunctional/delete_echo-server_images 0.04
155 TestFunctional/delete_my-image_image 0.02
156 TestFunctional/delete_minikube_cached_images 0.02
158 TestGvisorAddon 209.47
161 TestMultiControlPlane/serial/StartCluster 229.33
162 TestMultiControlPlane/serial/DeployApp 7.05
163 TestMultiControlPlane/serial/PingHostFromPods 1.51
164 TestMultiControlPlane/serial/AddWorkerNode 51.02
165 TestMultiControlPlane/serial/NodeLabels 0.07
166 TestMultiControlPlane/serial/HAppyAfterClusterStart 0.69
167 TestMultiControlPlane/serial/CopyFile 10.84
168 TestMultiControlPlane/serial/StopSecondaryNode 14.15
169 TestMultiControlPlane/serial/DegradedAfterControlPlaneNodeStop 0.52
170 TestMultiControlPlane/serial/RestartSecondaryNode 35.57
171 TestMultiControlPlane/serial/HAppyAfterSecondaryNodeRestart 0.94
172 TestMultiControlPlane/serial/RestartClusterKeepsNodes 176.34
173 TestMultiControlPlane/serial/DeleteSecondaryNode 7.26
174 TestMultiControlPlane/serial/DegradedAfterSecondaryNodeDelete 0.51
175 TestMultiControlPlane/serial/StopCluster 40.46
176 TestMultiControlPlane/serial/RestartCluster 127.98
177 TestMultiControlPlane/serial/DegradedAfterClusterRestart 0.53
178 TestMultiControlPlane/serial/AddSecondaryNode 92.34
179 TestMultiControlPlane/serial/HAppyAfterSecondaryNodeAdd 0.71
182 TestImageBuild/serial/Setup 44.41
183 TestImageBuild/serial/NormalBuild 1.53
184 TestImageBuild/serial/BuildWithBuildArg 1.06
185 TestImageBuild/serial/BuildWithDockerIgnore 0.71
186 TestImageBuild/serial/BuildWithSpecifiedDockerfile 1.14
191 TestJSONOutput/start/Command 89.21
192 TestJSONOutput/start/Audit 0
194 TestJSONOutput/start/parallel/DistinctCurrentSteps 0
195 TestJSONOutput/start/parallel/IncreasingCurrentSteps 0
197 TestJSONOutput/pause/Command 0.59
198 TestJSONOutput/pause/Audit 0
200 TestJSONOutput/pause/parallel/DistinctCurrentSteps 0
201 TestJSONOutput/pause/parallel/IncreasingCurrentSteps 0
203 TestJSONOutput/unpause/Command 0.59
204 TestJSONOutput/unpause/Audit 0
206 TestJSONOutput/unpause/parallel/DistinctCurrentSteps 0
207 TestJSONOutput/unpause/parallel/IncreasingCurrentSteps 0
209 TestJSONOutput/stop/Command 14.02
210 TestJSONOutput/stop/Audit 0
212 TestJSONOutput/stop/parallel/DistinctCurrentSteps 0
213 TestJSONOutput/stop/parallel/IncreasingCurrentSteps 0
214 TestErrorJSONOutput 0.23
219 TestMainNoArgs 0.06
220 TestMinikubeProfile 90.31
223 TestMountStart/serial/StartWithMountFirst 21.63
224 TestMountStart/serial/VerifyMountFirst 0.31
225 TestMountStart/serial/StartWithMountSecond 23.78
226 TestMountStart/serial/VerifyMountSecond 0.3
227 TestMountStart/serial/DeleteFirst 0.69
228 TestMountStart/serial/VerifyMountPostDelete 0.3
229 TestMountStart/serial/Stop 1.33
230 TestMountStart/serial/RestartStopped 19.58
231 TestMountStart/serial/VerifyMountPostStop 0.3
234 TestMultiNode/serial/FreshStart2Nodes 121.3
235 TestMultiNode/serial/DeployApp2Nodes 5.39
236 TestMultiNode/serial/PingHostFrom2Pods 0.9
237 TestMultiNode/serial/AddNode 48.92
238 TestMultiNode/serial/MultiNodeLabels 0.06
239 TestMultiNode/serial/ProfileList 0.47
240 TestMultiNode/serial/CopyFile 6.06
241 TestMultiNode/serial/StopNode 2.48
242 TestMultiNode/serial/StartAfterStop 41.41
243 TestMultiNode/serial/RestartKeepsNodes 179.86
244 TestMultiNode/serial/DeleteNode 2.14
245 TestMultiNode/serial/StopMultiNode 26.43
246 TestMultiNode/serial/RestartMultiNode 109.71
247 TestMultiNode/serial/ValidateNameConflict 46.24
252 TestPreload 160.23
254 TestScheduledStopUnix 116.15
255 TestSkaffold 130.94
258 TestRunningBinaryUpgrade 175.85
260 TestKubernetesUpgrade 220
273 TestISOImage/Setup 62.62
275 TestISOImage/Binaries/crictl 0.22
276 TestISOImage/Binaries/curl 0.19
277 TestISOImage/Binaries/docker 0.19
278 TestISOImage/Binaries/git 0.2
279 TestISOImage/Binaries/iptables 0.21
280 TestISOImage/Binaries/podman 0.2
281 TestISOImage/Binaries/rsync 0.2
282 TestISOImage/Binaries/socat 0.2
283 TestISOImage/Binaries/wget 0.19
284 TestISOImage/Binaries/VBoxControl 0.21
285 TestISOImage/Binaries/VBoxService 0.2
286 TestStoppedBinaryUpgrade/Setup 0.46
287 TestStoppedBinaryUpgrade/Upgrade 131.83
288 TestStoppedBinaryUpgrade/MinikubeLogs 0.99
297 TestPause/serial/Start 95.83
299 TestNoKubernetes/serial/StartNoK8sWithVersion 0.1
300 TestNoKubernetes/serial/StartWithK8s 68.12
301 TestNetworkPlugins/group/auto/Start 80.22
302 TestNoKubernetes/serial/StartWithStopK8s 15.89
303 TestPause/serial/SecondStartNoReconfiguration 62.54
304 TestNoKubernetes/serial/Start 25.32
305 TestNetworkPlugins/group/auto/KubeletFlags 0.17
306 TestNetworkPlugins/group/auto/NetCatPod 11.25
307 TestNoKubernetes/serial/VerifyNok8sNoK8sDownloads 0
308 TestNoKubernetes/serial/VerifyK8sNotRunning 0.18
309 TestNoKubernetes/serial/ProfileList 34.47
310 TestNetworkPlugins/group/auto/DNS 0.18
311 TestNetworkPlugins/group/auto/Localhost 0.14
312 TestNetworkPlugins/group/auto/HairPin 0.13
313 TestNetworkPlugins/group/kindnet/Start 75.59
314 TestPause/serial/Pause 0.7
315 TestPause/serial/VerifyStatus 0.29
316 TestPause/serial/Unpause 0.73
317 TestNoKubernetes/serial/Stop 1.47
318 TestPause/serial/PauseAgain 0.87
319 TestPause/serial/DeletePaused 1.04
320 TestNoKubernetes/serial/StartNoArgs 28.1
321 TestPause/serial/VerifyDeletedResources 0.48
322 TestNetworkPlugins/group/calico/Start 114.29
323 TestNetworkPlugins/group/custom-flannel/Start 99.07
324 TestNoKubernetes/serial/VerifyK8sNotRunningSecond 0.18
325 TestNetworkPlugins/group/false/Start 132.12
326 TestNetworkPlugins/group/kindnet/ControllerPod 6.01
327 TestNetworkPlugins/group/kindnet/KubeletFlags 0.21
328 TestNetworkPlugins/group/kindnet/NetCatPod 12.26
329 TestNetworkPlugins/group/kindnet/DNS 0.17
330 TestNetworkPlugins/group/kindnet/Localhost 0.2
331 TestNetworkPlugins/group/kindnet/HairPin 0.19
332 TestNetworkPlugins/group/enable-default-cni/Start 99.73
333 TestNetworkPlugins/group/calico/ControllerPod 6.01
334 TestNetworkPlugins/group/custom-flannel/KubeletFlags 0.2
335 TestNetworkPlugins/group/custom-flannel/NetCatPod 12.27
336 TestNetworkPlugins/group/calico/KubeletFlags 0.22
337 TestNetworkPlugins/group/calico/NetCatPod 20.33
338 TestNetworkPlugins/group/custom-flannel/DNS 0.25
339 TestNetworkPlugins/group/custom-flannel/Localhost 0.19
340 TestNetworkPlugins/group/custom-flannel/HairPin 0.2
341 TestNetworkPlugins/group/calico/DNS 0.23
342 TestNetworkPlugins/group/calico/Localhost 0.18
343 TestNetworkPlugins/group/calico/HairPin 0.18
344 TestNetworkPlugins/group/flannel/Start 69.54
345 TestNetworkPlugins/group/bridge/Start 102.33
346 TestNetworkPlugins/group/false/KubeletFlags 0.21
347 TestNetworkPlugins/group/false/NetCatPod 12.25
348 TestNetworkPlugins/group/false/DNS 0.23
349 TestNetworkPlugins/group/false/Localhost 0.17
350 TestNetworkPlugins/group/false/HairPin 0.18
351 TestNetworkPlugins/group/kubenet/Start 95.32
352 TestNetworkPlugins/group/enable-default-cni/KubeletFlags 0.21
353 TestNetworkPlugins/group/enable-default-cni/NetCatPod 14.26
354 TestNetworkPlugins/group/enable-default-cni/DNS 0.27
355 TestNetworkPlugins/group/enable-default-cni/Localhost 0.15
356 TestNetworkPlugins/group/enable-default-cni/HairPin 0.15
357 TestNetworkPlugins/group/flannel/ControllerPod 6.01
358 TestNetworkPlugins/group/flannel/KubeletFlags 0.23
359 TestNetworkPlugins/group/flannel/NetCatPod 13.56
361 TestStartStop/group/old-k8s-version/serial/FirstStart 102.02
362 TestNetworkPlugins/group/flannel/DNS 0.19
363 TestNetworkPlugins/group/flannel/Localhost 0.2
364 TestNetworkPlugins/group/flannel/HairPin 0.16
366 TestStartStop/group/no-preload/serial/FirstStart 107.71
367 TestNetworkPlugins/group/bridge/KubeletFlags 0.2
368 TestNetworkPlugins/group/bridge/NetCatPod 12.31
369 TestNetworkPlugins/group/bridge/DNS 0.19
370 TestNetworkPlugins/group/bridge/Localhost 0.16
371 TestNetworkPlugins/group/bridge/HairPin 0.16
372 TestNetworkPlugins/group/kubenet/KubeletFlags 0.22
373 TestNetworkPlugins/group/kubenet/NetCatPod 11.31
375 TestStartStop/group/embed-certs/serial/FirstStart 97
376 TestNetworkPlugins/group/kubenet/DNS 0.18
377 TestNetworkPlugins/group/kubenet/Localhost 0.14
378 TestNetworkPlugins/group/kubenet/HairPin 0.18
380 TestStartStop/group/default-k8s-diff-port/serial/FirstStart 93.56
381 TestStartStop/group/old-k8s-version/serial/DeployApp 10.41
382 TestStartStop/group/old-k8s-version/serial/EnableAddonWhileActive 1.31
383 TestStartStop/group/old-k8s-version/serial/Stop 14.33
384 TestStartStop/group/old-k8s-version/serial/EnableAddonAfterStop 0.16
385 TestStartStop/group/old-k8s-version/serial/SecondStart 45.54
386 TestStartStop/group/no-preload/serial/DeployApp 9.38
387 TestStartStop/group/no-preload/serial/EnableAddonWhileActive 1.18
388 TestStartStop/group/no-preload/serial/Stop 13.58
389 TestStartStop/group/no-preload/serial/EnableAddonAfterStop 0.17
390 TestStartStop/group/no-preload/serial/SecondStart 52.39
391 TestStartStop/group/embed-certs/serial/DeployApp 11.34
392 TestStartStop/group/embed-certs/serial/EnableAddonWhileActive 1.16
393 TestStartStop/group/embed-certs/serial/Stop 13.94
394 TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop 14.01
395 TestStartStop/group/default-k8s-diff-port/serial/DeployApp 11.35
396 TestStartStop/group/embed-certs/serial/EnableAddonAfterStop 0.17
397 TestStartStop/group/embed-certs/serial/SecondStart 52.64
398 TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop 5.09
399 TestStartStop/group/default-k8s-diff-port/serial/EnableAddonWhileActive 1.29
400 TestStartStop/group/default-k8s-diff-port/serial/Stop 13.7
401 TestStartStop/group/old-k8s-version/serial/VerifyKubernetesImages 0.24
402 TestStartStop/group/old-k8s-version/serial/Pause 2.99
404 TestStartStop/group/newest-cni/serial/FirstStart 67.45
405 TestStartStop/group/default-k8s-diff-port/serial/EnableAddonAfterStop 0.17
406 TestStartStop/group/default-k8s-diff-port/serial/SecondStart 75.66
407 TestStartStop/group/no-preload/serial/UserAppExistsAfterStop 6.01
408 TestStartStop/group/no-preload/serial/AddonExistsAfterStop 5.1
409 TestStartStop/group/no-preload/serial/VerifyKubernetesImages 0.21
411 TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop 9.01
412 TestStartStop/group/embed-certs/serial/AddonExistsAfterStop 5.09
413 TestStartStop/group/embed-certs/serial/VerifyKubernetesImages 0.28
414 TestStartStop/group/embed-certs/serial/Pause 3.36
425 TestStartStop/group/newest-cni/serial/DeployApp 0
426 TestStartStop/group/newest-cni/serial/EnableAddonWhileActive 1
427 TestStartStop/group/newest-cni/serial/Stop 13.58
428 TestStartStop/group/newest-cni/serial/EnableAddonAfterStop 0.16
429 TestStartStop/group/newest-cni/serial/SecondStart 41.7
430 TestStartStop/group/default-k8s-diff-port/serial/UserAppExistsAfterStop 11
431 TestStartStop/group/default-k8s-diff-port/serial/AddonExistsAfterStop 5.08
432 TestStartStop/group/default-k8s-diff-port/serial/VerifyKubernetesImages 0.2
433 TestStartStop/group/default-k8s-diff-port/serial/Pause 2.63
434 TestStartStop/group/newest-cni/serial/UserAppExistsAfterStop 0
435 TestStartStop/group/newest-cni/serial/AddonExistsAfterStop 0
436 TestStartStop/group/newest-cni/serial/VerifyKubernetesImages 0.24
437 TestStartStop/group/newest-cni/serial/Pause 2.65
x
+
TestDownloadOnly/v1.28.0/json-events (6.77s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.28.0/json-events
aaa_download_only_test.go:80: (dbg) Run:  out/minikube-linux-amd64 start -o=json --download-only -p download-only-656294 --force --alsologtostderr --kubernetes-version=v1.28.0 --container-runtime=docker --driver=kvm2 
aaa_download_only_test.go:80: (dbg) Done: out/minikube-linux-amd64 start -o=json --download-only -p download-only-656294 --force --alsologtostderr --kubernetes-version=v1.28.0 --container-runtime=docker --driver=kvm2 : (6.765919741s)
--- PASS: TestDownloadOnly/v1.28.0/json-events (6.77s)

                                                
                                    
x
+
TestDownloadOnly/v1.28.0/preload-exists (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.28.0/preload-exists
I1123 07:55:29.927064   22148 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime docker
I1123 07:55:29.927149   22148 preload.go:203] Found local preload: /home/jenkins/minikube-integration/21966-18241/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-docker-overlay2-amd64.tar.lz4
--- PASS: TestDownloadOnly/v1.28.0/preload-exists (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.28.0/LogsDuration (0.07s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.28.0/LogsDuration
aaa_download_only_test.go:183: (dbg) Run:  out/minikube-linux-amd64 logs -p download-only-656294
aaa_download_only_test.go:183: (dbg) Non-zero exit: out/minikube-linux-amd64 logs -p download-only-656294: exit status 85 (72.093043ms)

                                                
                                                
-- stdout --
	
	==> Audit <==
	┌─────────┬─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬──────────────────────┬─────────┬─────────┬─────────────────────┬──────────┐
	│ COMMAND │                                                                      ARGS                                                                       │       PROFILE        │  USER   │ VERSION │     START TIME      │ END TIME │
	├─────────┼─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼──────────────────────┼─────────┼─────────┼─────────────────────┼──────────┤
	│ start   │ -o=json --download-only -p download-only-656294 --force --alsologtostderr --kubernetes-version=v1.28.0 --container-runtime=docker --driver=kvm2 │ download-only-656294 │ jenkins │ v1.37.0 │ 23 Nov 25 07:55 UTC │          │
	└─────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────────────────────┴─────────┴─────────┴─────────────────────┴──────────┘
	
	
	==> Last Start <==
	Log file created at: 2025/11/23 07:55:23
	Running on machine: ubuntu-20-agent-3
	Binary: Built with gc go1.25.3 for linux/amd64
	Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
	I1123 07:55:23.211280   22160 out.go:360] Setting OutFile to fd 1 ...
	I1123 07:55:23.211379   22160 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I1123 07:55:23.211387   22160 out.go:374] Setting ErrFile to fd 2...
	I1123 07:55:23.211391   22160 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I1123 07:55:23.211562   22160 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21966-18241/.minikube/bin
	W1123 07:55:23.211675   22160 root.go:314] Error reading config file at /home/jenkins/minikube-integration/21966-18241/.minikube/config/config.json: open /home/jenkins/minikube-integration/21966-18241/.minikube/config/config.json: no such file or directory
	I1123 07:55:23.212144   22160 out.go:368] Setting JSON to true
	I1123 07:55:23.213461   22160 start.go:133] hostinfo: {"hostname":"ubuntu-20-agent-3","uptime":2272,"bootTime":1763882251,"procs":202,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"22.04","kernelVersion":"6.8.0-1044-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
	I1123 07:55:23.213518   22160 start.go:143] virtualization: kvm guest
	I1123 07:55:23.217260   22160 out.go:99] [download-only-656294] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
	W1123 07:55:23.217409   22160 preload.go:354] Failed to list preload files: open /home/jenkins/minikube-integration/21966-18241/.minikube/cache/preloaded-tarball: no such file or directory
	I1123 07:55:23.217462   22160 notify.go:221] Checking for updates...
	I1123 07:55:23.218812   22160 out.go:171] MINIKUBE_LOCATION=21966
	I1123 07:55:23.220404   22160 out.go:171] MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I1123 07:55:23.221847   22160 out.go:171] KUBECONFIG=/home/jenkins/minikube-integration/21966-18241/kubeconfig
	I1123 07:55:23.223119   22160 out.go:171] MINIKUBE_HOME=/home/jenkins/minikube-integration/21966-18241/.minikube
	I1123 07:55:23.224447   22160 out.go:171] MINIKUBE_BIN=out/minikube-linux-amd64
	W1123 07:55:23.226646   22160 out.go:336] minikube skips various validations when --force is supplied; this may lead to unexpected behavior
	I1123 07:55:23.226884   22160 driver.go:422] Setting default libvirt URI to qemu:///system
	I1123 07:55:23.760146   22160 out.go:99] Using the kvm2 driver based on user configuration
	I1123 07:55:23.760202   22160 start.go:309] selected driver: kvm2
	I1123 07:55:23.760212   22160 start.go:927] validating driver "kvm2" against <nil>
	I1123 07:55:23.760602   22160 start_flags.go:327] no existing cluster config was found, will generate one from the flags 
	I1123 07:55:23.761127   22160 start_flags.go:410] Using suggested 6144MB memory alloc based on sys=32093MB, container=0MB
	I1123 07:55:23.761323   22160 start_flags.go:974] Wait components to verify : map[apiserver:true system_pods:true]
	I1123 07:55:23.761351   22160 cni.go:84] Creating CNI manager for ""
	I1123 07:55:23.761402   22160 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
	I1123 07:55:23.761414   22160 start_flags.go:336] Found "bridge CNI" CNI - setting NetworkPlugin=cni
	I1123 07:55:23.761459   22160 start.go:353] cluster config:
	{Name:download-only-656294 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:6144 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:download-only-656294 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local Cont
ainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I1123 07:55:23.761630   22160 iso.go:125] acquiring lock: {Name:mk9cdb644d601a15f26caa6d527f7a63e06eb691 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
	I1123 07:55:23.763404   22160 out.go:99] Downloading VM boot image ...
	I1123 07:55:23.763439   22160 download.go:108] Downloading: https://storage.googleapis.com/minikube-builds/iso/21924/minikube-v1.37.0-1763503576-21924-amd64.iso?checksum=file:https://storage.googleapis.com/minikube-builds/iso/21924/minikube-v1.37.0-1763503576-21924-amd64.iso.sha256 -> /home/jenkins/minikube-integration/21966-18241/.minikube/cache/iso/amd64/minikube-v1.37.0-1763503576-21924-amd64.iso
	I1123 07:55:26.488371   22160 out.go:99] Starting "download-only-656294" primary control-plane node in "download-only-656294" cluster
	I1123 07:55:26.488442   22160 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime docker
	I1123 07:55:26.503283   22160 preload.go:148] Found remote preload: https://storage.googleapis.com/minikube-preloaded-volume-tarballs/v18/v1.28.0/preloaded-images-k8s-v18-v1.28.0-docker-overlay2-amd64.tar.lz4
	I1123 07:55:26.503317   22160 cache.go:65] Caching tarball of preloaded images
	I1123 07:55:26.503485   22160 preload.go:188] Checking if preload exists for k8s version v1.28.0 and runtime docker
	I1123 07:55:26.505045   22160 out.go:99] Downloading Kubernetes v1.28.0 preload ...
	I1123 07:55:26.505065   22160 preload.go:318] getting checksum for preloaded-images-k8s-v18-v1.28.0-docker-overlay2-amd64.tar.lz4 from gcs api...
	I1123 07:55:26.523848   22160 preload.go:295] Got checksum from GCS API "8a955be835827bc584bcce0658a7fcc9"
	I1123 07:55:26.523984   22160 download.go:108] Downloading: https://storage.googleapis.com/minikube-preloaded-volume-tarballs/v18/v1.28.0/preloaded-images-k8s-v18-v1.28.0-docker-overlay2-amd64.tar.lz4?checksum=md5:8a955be835827bc584bcce0658a7fcc9 -> /home/jenkins/minikube-integration/21966-18241/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-docker-overlay2-amd64.tar.lz4
	
	
	* The control-plane node download-only-656294 host does not exist
	  To start a cluster, run: "minikube start -p download-only-656294"

                                                
                                                
-- /stdout --
aaa_download_only_test.go:184: minikube logs failed with error: exit status 85
--- PASS: TestDownloadOnly/v1.28.0/LogsDuration (0.07s)

                                                
                                    
x
+
TestDownloadOnly/v1.28.0/DeleteAll (0.16s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.28.0/DeleteAll
aaa_download_only_test.go:196: (dbg) Run:  out/minikube-linux-amd64 delete --all
--- PASS: TestDownloadOnly/v1.28.0/DeleteAll (0.16s)

                                                
                                    
x
+
TestDownloadOnly/v1.28.0/DeleteAlwaysSucceeds (0.14s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.28.0/DeleteAlwaysSucceeds
aaa_download_only_test.go:207: (dbg) Run:  out/minikube-linux-amd64 delete -p download-only-656294
--- PASS: TestDownloadOnly/v1.28.0/DeleteAlwaysSucceeds (0.14s)

                                                
                                    
x
+
TestDownloadOnly/v1.34.1/json-events (3.06s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.34.1/json-events
aaa_download_only_test.go:80: (dbg) Run:  out/minikube-linux-amd64 start -o=json --download-only -p download-only-220208 --force --alsologtostderr --kubernetes-version=v1.34.1 --container-runtime=docker --driver=kvm2 
aaa_download_only_test.go:80: (dbg) Done: out/minikube-linux-amd64 start -o=json --download-only -p download-only-220208 --force --alsologtostderr --kubernetes-version=v1.34.1 --container-runtime=docker --driver=kvm2 : (3.0580598s)
--- PASS: TestDownloadOnly/v1.34.1/json-events (3.06s)

                                                
                                    
x
+
TestDownloadOnly/v1.34.1/preload-exists (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.34.1/preload-exists
I1123 07:55:33.363655   22148 preload.go:188] Checking if preload exists for k8s version v1.34.1 and runtime docker
I1123 07:55:33.363690   22148 preload.go:203] Found local preload: /home/jenkins/minikube-integration/21966-18241/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-docker-overlay2-amd64.tar.lz4
--- PASS: TestDownloadOnly/v1.34.1/preload-exists (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.34.1/LogsDuration (0.08s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.34.1/LogsDuration
aaa_download_only_test.go:183: (dbg) Run:  out/minikube-linux-amd64 logs -p download-only-220208
aaa_download_only_test.go:183: (dbg) Non-zero exit: out/minikube-linux-amd64 logs -p download-only-220208: exit status 85 (74.82303ms)

                                                
                                                
-- stdout --
	
	==> Audit <==
	┌─────────┬─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬──────────────────────┬─────────┬─────────┬─────────────────────┬─────────────────────┐
	│ COMMAND │                                                                      ARGS                                                                       │       PROFILE        │  USER   │ VERSION │     START TIME      │      END TIME       │
	├─────────┼─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼──────────────────────┼─────────┼─────────┼─────────────────────┼─────────────────────┤
	│ start   │ -o=json --download-only -p download-only-656294 --force --alsologtostderr --kubernetes-version=v1.28.0 --container-runtime=docker --driver=kvm2 │ download-only-656294 │ jenkins │ v1.37.0 │ 23 Nov 25 07:55 UTC │                     │
	│ delete  │ --all                                                                                                                                           │ minikube             │ jenkins │ v1.37.0 │ 23 Nov 25 07:55 UTC │ 23 Nov 25 07:55 UTC │
	│ delete  │ -p download-only-656294                                                                                                                         │ download-only-656294 │ jenkins │ v1.37.0 │ 23 Nov 25 07:55 UTC │ 23 Nov 25 07:55 UTC │
	│ start   │ -o=json --download-only -p download-only-220208 --force --alsologtostderr --kubernetes-version=v1.34.1 --container-runtime=docker --driver=kvm2 │ download-only-220208 │ jenkins │ v1.37.0 │ 23 Nov 25 07:55 UTC │                     │
	└─────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────────────────────┴─────────┴─────────┴─────────────────────┴─────────────────────┘
	
	
	==> Last Start <==
	Log file created at: 2025/11/23 07:55:30
	Running on machine: ubuntu-20-agent-3
	Binary: Built with gc go1.25.3 for linux/amd64
	Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
	I1123 07:55:30.357608   22354 out.go:360] Setting OutFile to fd 1 ...
	I1123 07:55:30.357834   22354 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I1123 07:55:30.357844   22354 out.go:374] Setting ErrFile to fd 2...
	I1123 07:55:30.357850   22354 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I1123 07:55:30.358039   22354 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21966-18241/.minikube/bin
	I1123 07:55:30.358522   22354 out.go:368] Setting JSON to true
	I1123 07:55:30.359334   22354 start.go:133] hostinfo: {"hostname":"ubuntu-20-agent-3","uptime":2279,"bootTime":1763882251,"procs":172,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"22.04","kernelVersion":"6.8.0-1044-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
	I1123 07:55:30.359392   22354 start.go:143] virtualization: kvm guest
	I1123 07:55:30.361721   22354 out.go:99] [download-only-220208] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
	I1123 07:55:30.361864   22354 notify.go:221] Checking for updates...
	I1123 07:55:30.363492   22354 out.go:171] MINIKUBE_LOCATION=21966
	I1123 07:55:30.364768   22354 out.go:171] MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I1123 07:55:30.365999   22354 out.go:171] KUBECONFIG=/home/jenkins/minikube-integration/21966-18241/kubeconfig
	I1123 07:55:30.367372   22354 out.go:171] MINIKUBE_HOME=/home/jenkins/minikube-integration/21966-18241/.minikube
	I1123 07:55:30.368841   22354 out.go:171] MINIKUBE_BIN=out/minikube-linux-amd64
	
	
	* The control-plane node download-only-220208 host does not exist
	  To start a cluster, run: "minikube start -p download-only-220208"

                                                
                                                
-- /stdout --
aaa_download_only_test.go:184: minikube logs failed with error: exit status 85
--- PASS: TestDownloadOnly/v1.34.1/LogsDuration (0.08s)

                                                
                                    
x
+
TestDownloadOnly/v1.34.1/DeleteAll (0.16s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.34.1/DeleteAll
aaa_download_only_test.go:196: (dbg) Run:  out/minikube-linux-amd64 delete --all
--- PASS: TestDownloadOnly/v1.34.1/DeleteAll (0.16s)

                                                
                                    
x
+
TestDownloadOnly/v1.34.1/DeleteAlwaysSucceeds (0.14s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.34.1/DeleteAlwaysSucceeds
aaa_download_only_test.go:207: (dbg) Run:  out/minikube-linux-amd64 delete -p download-only-220208
--- PASS: TestDownloadOnly/v1.34.1/DeleteAlwaysSucceeds (0.14s)

                                                
                                    
x
+
TestBinaryMirror (0.64s)

                                                
                                                
=== RUN   TestBinaryMirror
I1123 07:55:34.025804   22148 binary.go:80] Not caching binary, using https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubectl?checksum=file:https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubectl.sha256
aaa_download_only_test.go:309: (dbg) Run:  out/minikube-linux-amd64 start --download-only -p binary-mirror-217391 --alsologtostderr --binary-mirror http://127.0.0.1:38295 --driver=kvm2 
helpers_test.go:175: Cleaning up "binary-mirror-217391" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-amd64 delete -p binary-mirror-217391
--- PASS: TestBinaryMirror (0.64s)

                                                
                                    
x
+
TestOffline (118.44s)

                                                
                                                
=== RUN   TestOffline
=== PAUSE TestOffline

                                                
                                                

                                                
                                                
=== CONT  TestOffline
aab_offline_test.go:55: (dbg) Run:  out/minikube-linux-amd64 start -p offline-docker-008501 --alsologtostderr -v=1 --memory=3072 --wait=true --driver=kvm2 
aab_offline_test.go:55: (dbg) Done: out/minikube-linux-amd64 start -p offline-docker-008501 --alsologtostderr -v=1 --memory=3072 --wait=true --driver=kvm2 : (1m57.543527813s)
helpers_test.go:175: Cleaning up "offline-docker-008501" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-amd64 delete -p offline-docker-008501
--- PASS: TestOffline (118.44s)

                                                
                                    
x
+
TestAddons/PreSetup/EnablingAddonOnNonExistingCluster (0.06s)

                                                
                                                
=== RUN   TestAddons/PreSetup/EnablingAddonOnNonExistingCluster
=== PAUSE TestAddons/PreSetup/EnablingAddonOnNonExistingCluster

                                                
                                                

                                                
                                                
=== CONT  TestAddons/PreSetup/EnablingAddonOnNonExistingCluster
addons_test.go:1000: (dbg) Run:  out/minikube-linux-amd64 addons enable dashboard -p addons-085189
addons_test.go:1000: (dbg) Non-zero exit: out/minikube-linux-amd64 addons enable dashboard -p addons-085189: exit status 85 (61.694369ms)

                                                
                                                
-- stdout --
	* Profile "addons-085189" not found. Run "minikube profile list" to view all profiles.
	  To start a cluster, run: "minikube start -p addons-085189"

                                                
                                                
-- /stdout --
--- PASS: TestAddons/PreSetup/EnablingAddonOnNonExistingCluster (0.06s)

                                                
                                    
x
+
TestAddons/PreSetup/DisablingAddonOnNonExistingCluster (0.07s)

                                                
                                                
=== RUN   TestAddons/PreSetup/DisablingAddonOnNonExistingCluster
=== PAUSE TestAddons/PreSetup/DisablingAddonOnNonExistingCluster

                                                
                                                

                                                
                                                
=== CONT  TestAddons/PreSetup/DisablingAddonOnNonExistingCluster
addons_test.go:1011: (dbg) Run:  out/minikube-linux-amd64 addons disable dashboard -p addons-085189
addons_test.go:1011: (dbg) Non-zero exit: out/minikube-linux-amd64 addons disable dashboard -p addons-085189: exit status 85 (66.695324ms)

                                                
                                                
-- stdout --
	* Profile "addons-085189" not found. Run "minikube profile list" to view all profiles.
	  To start a cluster, run: "minikube start -p addons-085189"

                                                
                                                
-- /stdout --
--- PASS: TestAddons/PreSetup/DisablingAddonOnNonExistingCluster (0.07s)

                                                
                                    
x
+
TestAddons/Setup (212.76s)

                                                
                                                
=== RUN   TestAddons/Setup
addons_test.go:108: (dbg) Run:  out/minikube-linux-amd64 start -p addons-085189 --wait=true --memory=4096 --alsologtostderr --addons=registry --addons=registry-creds --addons=metrics-server --addons=volumesnapshots --addons=csi-hostpath-driver --addons=gcp-auth --addons=cloud-spanner --addons=inspektor-gadget --addons=nvidia-device-plugin --addons=yakd --addons=volcano --addons=amd-gpu-device-plugin --driver=kvm2  --addons=ingress --addons=ingress-dns --addons=storage-provisioner-rancher
addons_test.go:108: (dbg) Done: out/minikube-linux-amd64 start -p addons-085189 --wait=true --memory=4096 --alsologtostderr --addons=registry --addons=registry-creds --addons=metrics-server --addons=volumesnapshots --addons=csi-hostpath-driver --addons=gcp-auth --addons=cloud-spanner --addons=inspektor-gadget --addons=nvidia-device-plugin --addons=yakd --addons=volcano --addons=amd-gpu-device-plugin --driver=kvm2  --addons=ingress --addons=ingress-dns --addons=storage-provisioner-rancher: (3m32.761789177s)
--- PASS: TestAddons/Setup (212.76s)

                                                
                                    
x
+
TestAddons/serial/Volcano (43.34s)

                                                
                                                
=== RUN   TestAddons/serial/Volcano
addons_test.go:884: volcano-controller stabilized in 26.944268ms
addons_test.go:868: volcano-scheduler stabilized in 29.986595ms
addons_test.go:876: volcano-admission stabilized in 33.474131ms
addons_test.go:890: (dbg) TestAddons/serial/Volcano: waiting 6m0s for pods matching "app=volcano-scheduler" in namespace "volcano-system" ...
helpers_test.go:352: "volcano-scheduler-76c996c8bf-g45lg" [f6eff4b1-00cb-4075-8391-8357e6609b23] Running
addons_test.go:890: (dbg) TestAddons/serial/Volcano: app=volcano-scheduler healthy within 5.004671455s
addons_test.go:894: (dbg) TestAddons/serial/Volcano: waiting 6m0s for pods matching "app=volcano-admission" in namespace "volcano-system" ...
helpers_test.go:352: "volcano-admission-6c447bd768-w267x" [562b2e64-1fee-4e0e-be87-998c8565f70c] Running
addons_test.go:894: (dbg) TestAddons/serial/Volcano: app=volcano-admission healthy within 5.006456346s
addons_test.go:898: (dbg) TestAddons/serial/Volcano: waiting 6m0s for pods matching "app=volcano-controller" in namespace "volcano-system" ...
helpers_test.go:352: "volcano-controllers-6fd4f85cb8-7r48g" [a1869c71-0796-4cc8-93a2-13f5e27807fb] Running
addons_test.go:898: (dbg) TestAddons/serial/Volcano: app=volcano-controller healthy within 5.004246543s
addons_test.go:903: (dbg) Run:  kubectl --context addons-085189 delete -n volcano-system job volcano-admission-init
addons_test.go:909: (dbg) Run:  kubectl --context addons-085189 create -f testdata/vcjob.yaml
addons_test.go:917: (dbg) Run:  kubectl --context addons-085189 get vcjob -n my-volcano
addons_test.go:935: (dbg) TestAddons/serial/Volcano: waiting 3m0s for pods matching "volcano.sh/job-name=test-job" in namespace "my-volcano" ...
helpers_test.go:352: "test-job-nginx-0" [75187808-0a55-4765-86a8-65a4a742d278] Pending
helpers_test.go:352: "test-job-nginx-0" [75187808-0a55-4765-86a8-65a4a742d278] Pending / Ready:ContainersNotReady (containers with unready status: [nginx]) / ContainersReady:ContainersNotReady (containers with unready status: [nginx])
helpers_test.go:352: "test-job-nginx-0" [75187808-0a55-4765-86a8-65a4a742d278] Running
addons_test.go:935: (dbg) TestAddons/serial/Volcano: volcano.sh/job-name=test-job healthy within 16.005031734s
addons_test.go:1053: (dbg) Run:  out/minikube-linux-amd64 -p addons-085189 addons disable volcano --alsologtostderr -v=1
addons_test.go:1053: (dbg) Done: out/minikube-linux-amd64 -p addons-085189 addons disable volcano --alsologtostderr -v=1: (11.881296653s)
--- PASS: TestAddons/serial/Volcano (43.34s)

                                                
                                    
x
+
TestAddons/serial/GCPAuth/Namespaces (0.11s)

                                                
                                                
=== RUN   TestAddons/serial/GCPAuth/Namespaces
addons_test.go:630: (dbg) Run:  kubectl --context addons-085189 create ns new-namespace
addons_test.go:644: (dbg) Run:  kubectl --context addons-085189 get secret gcp-auth -n new-namespace
--- PASS: TestAddons/serial/GCPAuth/Namespaces (0.11s)

                                                
                                    
x
+
TestAddons/serial/GCPAuth/FakeCredentials (9.57s)

                                                
                                                
=== RUN   TestAddons/serial/GCPAuth/FakeCredentials
addons_test.go:675: (dbg) Run:  kubectl --context addons-085189 create -f testdata/busybox.yaml
addons_test.go:682: (dbg) Run:  kubectl --context addons-085189 create sa gcp-auth-test
addons_test.go:688: (dbg) TestAddons/serial/GCPAuth/FakeCredentials: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ...
helpers_test.go:352: "busybox" [bdf26827-b241-4a5b-ad98-1f5ca95149dd] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox])
helpers_test.go:352: "busybox" [bdf26827-b241-4a5b-ad98-1f5ca95149dd] Running
addons_test.go:688: (dbg) TestAddons/serial/GCPAuth/FakeCredentials: integration-test=busybox healthy within 9.004505013s
addons_test.go:694: (dbg) Run:  kubectl --context addons-085189 exec busybox -- /bin/sh -c "printenv GOOGLE_APPLICATION_CREDENTIALS"
addons_test.go:706: (dbg) Run:  kubectl --context addons-085189 describe sa gcp-auth-test
addons_test.go:744: (dbg) Run:  kubectl --context addons-085189 exec busybox -- /bin/sh -c "printenv GOOGLE_CLOUD_PROJECT"
--- PASS: TestAddons/serial/GCPAuth/FakeCredentials (9.57s)

                                                
                                    
x
+
TestAddons/parallel/Registry (16.61s)

                                                
                                                
=== RUN   TestAddons/parallel/Registry
=== PAUSE TestAddons/parallel/Registry

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/Registry
addons_test.go:382: registry stabilized in 7.809242ms
addons_test.go:384: (dbg) TestAddons/parallel/Registry: waiting 6m0s for pods matching "actual-registry=true" in namespace "kube-system" ...
helpers_test.go:352: "registry-6b586f9694-spx4n" [45d13e1c-e495-4c09-bebe-bf1b1178546f] Running
addons_test.go:384: (dbg) TestAddons/parallel/Registry: actual-registry=true healthy within 5.003525621s
addons_test.go:387: (dbg) TestAddons/parallel/Registry: waiting 10m0s for pods matching "registry-proxy=true" in namespace "kube-system" ...
helpers_test.go:352: "registry-proxy-clmzp" [1da43556-cb1f-4752-a2c5-af93985ca155] Running
addons_test.go:387: (dbg) TestAddons/parallel/Registry: registry-proxy=true healthy within 5.010678528s
addons_test.go:392: (dbg) Run:  kubectl --context addons-085189 delete po -l run=registry-test --now
addons_test.go:397: (dbg) Run:  kubectl --context addons-085189 run --rm registry-test --restart=Never --image=gcr.io/k8s-minikube/busybox -it -- sh -c "wget --spider -S http://registry.kube-system.svc.cluster.local"
addons_test.go:397: (dbg) Done: kubectl --context addons-085189 run --rm registry-test --restart=Never --image=gcr.io/k8s-minikube/busybox -it -- sh -c "wget --spider -S http://registry.kube-system.svc.cluster.local": (5.646827098s)
addons_test.go:411: (dbg) Run:  out/minikube-linux-amd64 -p addons-085189 ip
addons_test.go:1053: (dbg) Run:  out/minikube-linux-amd64 -p addons-085189 addons disable registry --alsologtostderr -v=1
--- PASS: TestAddons/parallel/Registry (16.61s)

                                                
                                    
x
+
TestAddons/parallel/RegistryCreds (0.64s)

                                                
                                                
=== RUN   TestAddons/parallel/RegistryCreds
=== PAUSE TestAddons/parallel/RegistryCreds

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/RegistryCreds
addons_test.go:323: registry-creds stabilized in 29.755188ms
addons_test.go:325: (dbg) Run:  out/minikube-linux-amd64 addons configure registry-creds -f ./testdata/addons_testconfig.json -p addons-085189
addons_test.go:332: (dbg) Run:  kubectl --context addons-085189 -n kube-system get secret -o yaml
addons_test.go:1053: (dbg) Run:  out/minikube-linux-amd64 -p addons-085189 addons disable registry-creds --alsologtostderr -v=1
--- PASS: TestAddons/parallel/RegistryCreds (0.64s)

                                                
                                    
x
+
TestAddons/parallel/Ingress (23s)

                                                
                                                
=== RUN   TestAddons/parallel/Ingress
=== PAUSE TestAddons/parallel/Ingress

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/Ingress
addons_test.go:209: (dbg) Run:  kubectl --context addons-085189 wait --for=condition=ready --namespace=ingress-nginx pod --selector=app.kubernetes.io/component=controller --timeout=90s
addons_test.go:234: (dbg) Run:  kubectl --context addons-085189 replace --force -f testdata/nginx-ingress-v1.yaml
addons_test.go:247: (dbg) Run:  kubectl --context addons-085189 replace --force -f testdata/nginx-pod-svc.yaml
addons_test.go:252: (dbg) TestAddons/parallel/Ingress: waiting 8m0s for pods matching "run=nginx" in namespace "default" ...
helpers_test.go:352: "nginx" [11b93a65-2f57-4dbb-99a0-e8e6d5d7f083] Pending / Ready:ContainersNotReady (containers with unready status: [nginx]) / ContainersReady:ContainersNotReady (containers with unready status: [nginx])
helpers_test.go:352: "nginx" [11b93a65-2f57-4dbb-99a0-e8e6d5d7f083] Running
addons_test.go:252: (dbg) TestAddons/parallel/Ingress: run=nginx healthy within 13.004537437s
I1123 08:00:39.615362   22148 kapi.go:150] Service nginx in namespace default found.
addons_test.go:264: (dbg) Run:  out/minikube-linux-amd64 -p addons-085189 ssh "curl -s http://127.0.0.1/ -H 'Host: nginx.example.com'"
addons_test.go:288: (dbg) Run:  kubectl --context addons-085189 replace --force -f testdata/ingress-dns-example-v1.yaml
addons_test.go:293: (dbg) Run:  out/minikube-linux-amd64 -p addons-085189 ip
addons_test.go:299: (dbg) Run:  nslookup hello-john.test 192.168.39.33
addons_test.go:1053: (dbg) Run:  out/minikube-linux-amd64 -p addons-085189 addons disable ingress-dns --alsologtostderr -v=1
addons_test.go:1053: (dbg) Run:  out/minikube-linux-amd64 -p addons-085189 addons disable ingress --alsologtostderr -v=1
addons_test.go:1053: (dbg) Done: out/minikube-linux-amd64 -p addons-085189 addons disable ingress --alsologtostderr -v=1: (7.718428587s)
--- PASS: TestAddons/parallel/Ingress (23.00s)

                                                
                                    
x
+
TestAddons/parallel/InspektorGadget (11.89s)

                                                
                                                
=== RUN   TestAddons/parallel/InspektorGadget
=== PAUSE TestAddons/parallel/InspektorGadget

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/InspektorGadget
addons_test.go:823: (dbg) TestAddons/parallel/InspektorGadget: waiting 8m0s for pods matching "k8s-app=gadget" in namespace "gadget" ...
helpers_test.go:352: "gadget-nlzcf" [fa2acd55-24b4-4506-abef-74efce637d70] Running
addons_test.go:823: (dbg) TestAddons/parallel/InspektorGadget: k8s-app=gadget healthy within 6.004020379s
addons_test.go:1053: (dbg) Run:  out/minikube-linux-amd64 -p addons-085189 addons disable inspektor-gadget --alsologtostderr -v=1
addons_test.go:1053: (dbg) Done: out/minikube-linux-amd64 -p addons-085189 addons disable inspektor-gadget --alsologtostderr -v=1: (5.881153723s)
--- PASS: TestAddons/parallel/InspektorGadget (11.89s)

                                                
                                    
x
+
TestAddons/parallel/MetricsServer (6.42s)

                                                
                                                
=== RUN   TestAddons/parallel/MetricsServer
=== PAUSE TestAddons/parallel/MetricsServer

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/MetricsServer
addons_test.go:455: metrics-server stabilized in 8.124218ms
addons_test.go:457: (dbg) TestAddons/parallel/MetricsServer: waiting 6m0s for pods matching "k8s-app=metrics-server" in namespace "kube-system" ...
helpers_test.go:352: "metrics-server-85b7d694d7-zdqt8" [8cdab65e-662b-47a1-a112-61eaaf341c00] Running
addons_test.go:457: (dbg) TestAddons/parallel/MetricsServer: k8s-app=metrics-server healthy within 5.004863491s
addons_test.go:463: (dbg) Run:  kubectl --context addons-085189 top pods -n kube-system
addons_test.go:1053: (dbg) Run:  out/minikube-linux-amd64 -p addons-085189 addons disable metrics-server --alsologtostderr -v=1
addons_test.go:1053: (dbg) Done: out/minikube-linux-amd64 -p addons-085189 addons disable metrics-server --alsologtostderr -v=1: (1.322698816s)
--- PASS: TestAddons/parallel/MetricsServer (6.42s)

                                                
                                    
x
+
TestAddons/parallel/CSI (38.99s)

                                                
                                                
=== RUN   TestAddons/parallel/CSI
=== PAUSE TestAddons/parallel/CSI

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/CSI
I1123 08:00:22.862427   22148 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=csi-hostpath-driver" in ns "kube-system" ...
I1123 08:00:22.873601   22148 kapi.go:86] Found 3 Pods for label selector kubernetes.io/minikube-addons=csi-hostpath-driver
I1123 08:00:22.873626   22148 kapi.go:107] duration metric: took 11.213952ms to wait for kubernetes.io/minikube-addons=csi-hostpath-driver ...
addons_test.go:549: csi-hostpath-driver pods stabilized in 11.223308ms
addons_test.go:552: (dbg) Run:  kubectl --context addons-085189 create -f testdata/csi-hostpath-driver/pvc.yaml
addons_test.go:557: (dbg) TestAddons/parallel/CSI: waiting 6m0s for pvc "hpvc" in namespace "default" ...
helpers_test.go:402: (dbg) Run:  kubectl --context addons-085189 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-085189 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-085189 get pvc hpvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-085189 get pvc hpvc -o jsonpath={.status.phase} -n default
addons_test.go:562: (dbg) Run:  kubectl --context addons-085189 create -f testdata/csi-hostpath-driver/pv-pod.yaml
addons_test.go:567: (dbg) TestAddons/parallel/CSI: waiting 6m0s for pods matching "app=task-pv-pod" in namespace "default" ...
helpers_test.go:352: "task-pv-pod" [2fd49935-eac2-49ea-ab3b-bde1c027c5b8] Pending
helpers_test.go:352: "task-pv-pod" [2fd49935-eac2-49ea-ab3b-bde1c027c5b8] Pending / Ready:ContainersNotReady (containers with unready status: [task-pv-container]) / ContainersReady:ContainersNotReady (containers with unready status: [task-pv-container])
helpers_test.go:352: "task-pv-pod" [2fd49935-eac2-49ea-ab3b-bde1c027c5b8] Running
addons_test.go:567: (dbg) TestAddons/parallel/CSI: app=task-pv-pod healthy within 10.003848052s
addons_test.go:572: (dbg) Run:  kubectl --context addons-085189 create -f testdata/csi-hostpath-driver/snapshot.yaml
addons_test.go:577: (dbg) TestAddons/parallel/CSI: waiting 6m0s for volume snapshot "new-snapshot-demo" in namespace "default" ...
helpers_test.go:427: (dbg) Run:  kubectl --context addons-085189 get volumesnapshot new-snapshot-demo -o jsonpath={.status.readyToUse} -n default
helpers_test.go:435: TestAddons/parallel/CSI: WARNING: volume snapshot get for "default" "new-snapshot-demo" returned: 
helpers_test.go:427: (dbg) Run:  kubectl --context addons-085189 get volumesnapshot new-snapshot-demo -o jsonpath={.status.readyToUse} -n default
addons_test.go:582: (dbg) Run:  kubectl --context addons-085189 delete pod task-pv-pod
addons_test.go:588: (dbg) Run:  kubectl --context addons-085189 delete pvc hpvc
addons_test.go:594: (dbg) Run:  kubectl --context addons-085189 create -f testdata/csi-hostpath-driver/pvc-restore.yaml
addons_test.go:599: (dbg) TestAddons/parallel/CSI: waiting 6m0s for pvc "hpvc-restore" in namespace "default" ...
helpers_test.go:402: (dbg) Run:  kubectl --context addons-085189 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-085189 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-085189 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-085189 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-085189 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-085189 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-085189 get pvc hpvc-restore -o jsonpath={.status.phase} -n default
addons_test.go:604: (dbg) Run:  kubectl --context addons-085189 create -f testdata/csi-hostpath-driver/pv-pod-restore.yaml
addons_test.go:609: (dbg) TestAddons/parallel/CSI: waiting 6m0s for pods matching "app=task-pv-pod-restore" in namespace "default" ...
helpers_test.go:352: "task-pv-pod-restore" [550405e5-d933-4b58-ac5b-338f1e5dfc45] Pending
helpers_test.go:352: "task-pv-pod-restore" [550405e5-d933-4b58-ac5b-338f1e5dfc45] Pending / Ready:ContainersNotReady (containers with unready status: [task-pv-container]) / ContainersReady:ContainersNotReady (containers with unready status: [task-pv-container])
helpers_test.go:352: "task-pv-pod-restore" [550405e5-d933-4b58-ac5b-338f1e5dfc45] Running
addons_test.go:609: (dbg) TestAddons/parallel/CSI: app=task-pv-pod-restore healthy within 8.005091585s
addons_test.go:614: (dbg) Run:  kubectl --context addons-085189 delete pod task-pv-pod-restore
addons_test.go:614: (dbg) Done: kubectl --context addons-085189 delete pod task-pv-pod-restore: (1.005649829s)
addons_test.go:618: (dbg) Run:  kubectl --context addons-085189 delete pvc hpvc-restore
addons_test.go:622: (dbg) Run:  kubectl --context addons-085189 delete volumesnapshot new-snapshot-demo
addons_test.go:1053: (dbg) Run:  out/minikube-linux-amd64 -p addons-085189 addons disable volumesnapshots --alsologtostderr -v=1
addons_test.go:1053: (dbg) Run:  out/minikube-linux-amd64 -p addons-085189 addons disable csi-hostpath-driver --alsologtostderr -v=1
addons_test.go:1053: (dbg) Done: out/minikube-linux-amd64 -p addons-085189 addons disable csi-hostpath-driver --alsologtostderr -v=1: (7.025692057s)
--- PASS: TestAddons/parallel/CSI (38.99s)

                                                
                                    
x
+
TestAddons/parallel/Headlamp (22.4s)

                                                
                                                
=== RUN   TestAddons/parallel/Headlamp
=== PAUSE TestAddons/parallel/Headlamp

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/Headlamp
addons_test.go:808: (dbg) Run:  out/minikube-linux-amd64 addons enable headlamp -p addons-085189 --alsologtostderr -v=1
addons_test.go:813: (dbg) TestAddons/parallel/Headlamp: waiting 8m0s for pods matching "app.kubernetes.io/name=headlamp" in namespace "headlamp" ...
helpers_test.go:352: "headlamp-dfcdc64b-smbtn" [a31ee149-60ea-4c73-865f-7eae3e2f3080] Pending
helpers_test.go:352: "headlamp-dfcdc64b-smbtn" [a31ee149-60ea-4c73-865f-7eae3e2f3080] Pending / Ready:ContainersNotReady (containers with unready status: [headlamp]) / ContainersReady:ContainersNotReady (containers with unready status: [headlamp])
helpers_test.go:352: "headlamp-dfcdc64b-smbtn" [a31ee149-60ea-4c73-865f-7eae3e2f3080] Running / Ready:ContainersNotReady (containers with unready status: [headlamp]) / ContainersReady:ContainersNotReady (containers with unready status: [headlamp])
helpers_test.go:352: "headlamp-dfcdc64b-smbtn" [a31ee149-60ea-4c73-865f-7eae3e2f3080] Running
addons_test.go:813: (dbg) TestAddons/parallel/Headlamp: app.kubernetes.io/name=headlamp healthy within 15.00445395s
addons_test.go:1053: (dbg) Run:  out/minikube-linux-amd64 -p addons-085189 addons disable headlamp --alsologtostderr -v=1
addons_test.go:1053: (dbg) Done: out/minikube-linux-amd64 -p addons-085189 addons disable headlamp --alsologtostderr -v=1: (6.532497212s)
--- PASS: TestAddons/parallel/Headlamp (22.40s)

                                                
                                    
x
+
TestAddons/parallel/CloudSpanner (6.79s)

                                                
                                                
=== RUN   TestAddons/parallel/CloudSpanner
=== PAUSE TestAddons/parallel/CloudSpanner

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/CloudSpanner
addons_test.go:840: (dbg) TestAddons/parallel/CloudSpanner: waiting 6m0s for pods matching "app=cloud-spanner-emulator" in namespace "default" ...
helpers_test.go:352: "cloud-spanner-emulator-5bdddb765-mkwhd" [23cba3d4-20a3-4e3f-8727-13cbb715b6eb] Running
addons_test.go:840: (dbg) TestAddons/parallel/CloudSpanner: app=cloud-spanner-emulator healthy within 6.008838216s
addons_test.go:1053: (dbg) Run:  out/minikube-linux-amd64 -p addons-085189 addons disable cloud-spanner --alsologtostderr -v=1
--- PASS: TestAddons/parallel/CloudSpanner (6.79s)

                                                
                                    
x
+
TestAddons/parallel/LocalPath (57.96s)

                                                
                                                
=== RUN   TestAddons/parallel/LocalPath
=== PAUSE TestAddons/parallel/LocalPath

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/LocalPath
addons_test.go:949: (dbg) Run:  kubectl --context addons-085189 apply -f testdata/storage-provisioner-rancher/pvc.yaml
addons_test.go:955: (dbg) Run:  kubectl --context addons-085189 apply -f testdata/storage-provisioner-rancher/pod.yaml
addons_test.go:959: (dbg) TestAddons/parallel/LocalPath: waiting 5m0s for pvc "test-pvc" in namespace "default" ...
helpers_test.go:402: (dbg) Run:  kubectl --context addons-085189 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-085189 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-085189 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-085189 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-085189 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-085189 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-085189 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-085189 get pvc test-pvc -o jsonpath={.status.phase} -n default
helpers_test.go:402: (dbg) Run:  kubectl --context addons-085189 get pvc test-pvc -o jsonpath={.status.phase} -n default
addons_test.go:962: (dbg) TestAddons/parallel/LocalPath: waiting 3m0s for pods matching "run=test-local-path" in namespace "default" ...
helpers_test.go:352: "test-local-path" [e4431431-ee78-4a42-9d49-b4644a83d832] Pending
helpers_test.go:352: "test-local-path" [e4431431-ee78-4a42-9d49-b4644a83d832] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox])
2025/11/23 08:00:25 [DEBUG] GET http://192.168.39.33:5000
helpers_test.go:352: "test-local-path" [e4431431-ee78-4a42-9d49-b4644a83d832] Pending / Initialized:PodCompleted / Ready:PodCompleted / ContainersReady:PodCompleted
helpers_test.go:352: "test-local-path" [e4431431-ee78-4a42-9d49-b4644a83d832] Succeeded / Initialized:PodCompleted / Ready:PodCompleted / ContainersReady:PodCompleted
addons_test.go:962: (dbg) TestAddons/parallel/LocalPath: run=test-local-path healthy within 6.007011446s
addons_test.go:967: (dbg) Run:  kubectl --context addons-085189 get pvc test-pvc -o=json
addons_test.go:976: (dbg) Run:  out/minikube-linux-amd64 -p addons-085189 ssh "cat /opt/local-path-provisioner/pvc-622f42e9-c00b-4f40-800d-218cde12eb5c_default_test-pvc/file1"
addons_test.go:988: (dbg) Run:  kubectl --context addons-085189 delete pod test-local-path
addons_test.go:992: (dbg) Run:  kubectl --context addons-085189 delete pvc test-pvc
addons_test.go:1053: (dbg) Run:  out/minikube-linux-amd64 -p addons-085189 addons disable storage-provisioner-rancher --alsologtostderr -v=1
addons_test.go:1053: (dbg) Done: out/minikube-linux-amd64 -p addons-085189 addons disable storage-provisioner-rancher --alsologtostderr -v=1: (43.123135406s)
--- PASS: TestAddons/parallel/LocalPath (57.96s)

                                                
                                    
x
+
TestAddons/parallel/NvidiaDevicePlugin (6.67s)

                                                
                                                
=== RUN   TestAddons/parallel/NvidiaDevicePlugin
=== PAUSE TestAddons/parallel/NvidiaDevicePlugin

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/NvidiaDevicePlugin
addons_test.go:1025: (dbg) TestAddons/parallel/NvidiaDevicePlugin: waiting 6m0s for pods matching "name=nvidia-device-plugin-ds" in namespace "kube-system" ...
helpers_test.go:352: "nvidia-device-plugin-daemonset-pwmfs" [66ded332-d1ea-42e3-941c-46ff5b5f8b4a] Running
addons_test.go:1025: (dbg) TestAddons/parallel/NvidiaDevicePlugin: name=nvidia-device-plugin-ds healthy within 6.008311249s
addons_test.go:1053: (dbg) Run:  out/minikube-linux-amd64 -p addons-085189 addons disable nvidia-device-plugin --alsologtostderr -v=1
--- PASS: TestAddons/parallel/NvidiaDevicePlugin (6.67s)

                                                
                                    
x
+
TestAddons/parallel/Yakd (12.74s)

                                                
                                                
=== RUN   TestAddons/parallel/Yakd
=== PAUSE TestAddons/parallel/Yakd

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/Yakd
addons_test.go:1047: (dbg) TestAddons/parallel/Yakd: waiting 2m0s for pods matching "app.kubernetes.io/name=yakd-dashboard" in namespace "yakd-dashboard" ...
helpers_test.go:352: "yakd-dashboard-5ff678cb9-rkqrw" [d9b3075c-629b-4eda-948a-6dea96ae22a7] Running
addons_test.go:1047: (dbg) TestAddons/parallel/Yakd: app.kubernetes.io/name=yakd-dashboard healthy within 6.004606132s
addons_test.go:1053: (dbg) Run:  out/minikube-linux-amd64 -p addons-085189 addons disable yakd --alsologtostderr -v=1
addons_test.go:1053: (dbg) Done: out/minikube-linux-amd64 -p addons-085189 addons disable yakd --alsologtostderr -v=1: (6.734863711s)
--- PASS: TestAddons/parallel/Yakd (12.74s)

                                                
                                    
x
+
TestAddons/StoppedEnableDisable (14.32s)

                                                
                                                
=== RUN   TestAddons/StoppedEnableDisable
addons_test.go:172: (dbg) Run:  out/minikube-linux-amd64 stop -p addons-085189
addons_test.go:172: (dbg) Done: out/minikube-linux-amd64 stop -p addons-085189: (14.125463466s)
addons_test.go:176: (dbg) Run:  out/minikube-linux-amd64 addons enable dashboard -p addons-085189
addons_test.go:180: (dbg) Run:  out/minikube-linux-amd64 addons disable dashboard -p addons-085189
addons_test.go:185: (dbg) Run:  out/minikube-linux-amd64 addons disable gvisor -p addons-085189
--- PASS: TestAddons/StoppedEnableDisable (14.32s)

                                                
                                    
x
+
TestCertOptions (60.42s)

                                                
                                                
=== RUN   TestCertOptions
=== PAUSE TestCertOptions

                                                
                                                

                                                
                                                
=== CONT  TestCertOptions
cert_options_test.go:49: (dbg) Run:  out/minikube-linux-amd64 start -p cert-options-783136 --memory=3072 --apiserver-ips=127.0.0.1 --apiserver-ips=192.168.15.15 --apiserver-names=localhost --apiserver-names=www.google.com --apiserver-port=8555 --driver=kvm2 
E1123 08:45:59.016140   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/functional-086932/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
cert_options_test.go:49: (dbg) Done: out/minikube-linux-amd64 start -p cert-options-783136 --memory=3072 --apiserver-ips=127.0.0.1 --apiserver-ips=192.168.15.15 --apiserver-names=localhost --apiserver-names=www.google.com --apiserver-port=8555 --driver=kvm2 : (59.000767501s)
cert_options_test.go:60: (dbg) Run:  out/minikube-linux-amd64 -p cert-options-783136 ssh "openssl x509 -text -noout -in /var/lib/minikube/certs/apiserver.crt"
cert_options_test.go:88: (dbg) Run:  kubectl --context cert-options-783136 config view
cert_options_test.go:100: (dbg) Run:  out/minikube-linux-amd64 ssh -p cert-options-783136 -- "sudo cat /etc/kubernetes/admin.conf"
helpers_test.go:175: Cleaning up "cert-options-783136" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-amd64 delete -p cert-options-783136
helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p cert-options-783136: (1.03954605s)
--- PASS: TestCertOptions (60.42s)

                                                
                                    
x
+
TestCertExpiration (316.83s)

                                                
                                                
=== RUN   TestCertExpiration
=== PAUSE TestCertExpiration

                                                
                                                

                                                
                                                
=== CONT  TestCertExpiration
cert_options_test.go:123: (dbg) Run:  out/minikube-linux-amd64 start -p cert-expiration-494771 --memory=3072 --cert-expiration=3m --driver=kvm2 
cert_options_test.go:123: (dbg) Done: out/minikube-linux-amd64 start -p cert-expiration-494771 --memory=3072 --cert-expiration=3m --driver=kvm2 : (1m2.035943754s)
cert_options_test.go:131: (dbg) Run:  out/minikube-linux-amd64 start -p cert-expiration-494771 --memory=3072 --cert-expiration=8760h --driver=kvm2 
cert_options_test.go:131: (dbg) Done: out/minikube-linux-amd64 start -p cert-expiration-494771 --memory=3072 --cert-expiration=8760h --driver=kvm2 : (1m13.899796852s)
helpers_test.go:175: Cleaning up "cert-expiration-494771" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-amd64 delete -p cert-expiration-494771
--- PASS: TestCertExpiration (316.83s)

                                                
                                    
x
+
TestDockerFlags (59.65s)

                                                
                                                
=== RUN   TestDockerFlags
=== PAUSE TestDockerFlags

                                                
                                                

                                                
                                                
=== CONT  TestDockerFlags
docker_test.go:51: (dbg) Run:  out/minikube-linux-amd64 start -p docker-flags-773217 --cache-images=false --memory=3072 --install-addons=false --wait=false --docker-env=FOO=BAR --docker-env=BAZ=BAT --docker-opt=debug --docker-opt=icc=true --alsologtostderr -v=5 --driver=kvm2 
docker_test.go:51: (dbg) Done: out/minikube-linux-amd64 start -p docker-flags-773217 --cache-images=false --memory=3072 --install-addons=false --wait=false --docker-env=FOO=BAR --docker-env=BAZ=BAT --docker-opt=debug --docker-opt=icc=true --alsologtostderr -v=5 --driver=kvm2 : (58.158350249s)
docker_test.go:56: (dbg) Run:  out/minikube-linux-amd64 -p docker-flags-773217 ssh "sudo systemctl show docker --property=Environment --no-pager"
docker_test.go:67: (dbg) Run:  out/minikube-linux-amd64 -p docker-flags-773217 ssh "sudo systemctl show docker --property=ExecStart --no-pager"
helpers_test.go:175: Cleaning up "docker-flags-773217" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-amd64 delete -p docker-flags-773217
helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p docker-flags-773217: (1.066409325s)
--- PASS: TestDockerFlags (59.65s)

                                                
                                    
x
+
TestForceSystemdFlag (69.53s)

                                                
                                                
=== RUN   TestForceSystemdFlag
=== PAUSE TestForceSystemdFlag

                                                
                                                

                                                
                                                
=== CONT  TestForceSystemdFlag
docker_test.go:91: (dbg) Run:  out/minikube-linux-amd64 start -p force-systemd-flag-910640 --memory=3072 --force-systemd --alsologtostderr -v=5 --driver=kvm2 
E1123 08:46:58.729390   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/skaffold-153557/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:46:58.735876   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/skaffold-153557/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:46:58.747346   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/skaffold-153557/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:46:58.768824   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/skaffold-153557/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:46:58.810303   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/skaffold-153557/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:46:58.891853   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/skaffold-153557/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:46:59.053680   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/skaffold-153557/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:46:59.375793   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/skaffold-153557/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:47:00.018073   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/skaffold-153557/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:47:01.299798   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/skaffold-153557/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:47:03.861866   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/skaffold-153557/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:47:08.983464   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/skaffold-153557/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:47:19.224956   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/skaffold-153557/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
docker_test.go:91: (dbg) Done: out/minikube-linux-amd64 start -p force-systemd-flag-910640 --memory=3072 --force-systemd --alsologtostderr -v=5 --driver=kvm2 : (1m8.317906822s)
docker_test.go:110: (dbg) Run:  out/minikube-linux-amd64 -p force-systemd-flag-910640 ssh "docker info --format {{.CgroupDriver}}"
helpers_test.go:175: Cleaning up "force-systemd-flag-910640" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-amd64 delete -p force-systemd-flag-910640
--- PASS: TestForceSystemdFlag (69.53s)

                                                
                                    
x
+
TestForceSystemdEnv (51.49s)

                                                
                                                
=== RUN   TestForceSystemdEnv
=== PAUSE TestForceSystemdEnv

                                                
                                                

                                                
                                                
=== CONT  TestForceSystemdEnv
docker_test.go:155: (dbg) Run:  out/minikube-linux-amd64 start -p force-systemd-env-630963 --memory=3072 --alsologtostderr -v=5 --driver=kvm2 
docker_test.go:155: (dbg) Done: out/minikube-linux-amd64 start -p force-systemd-env-630963 --memory=3072 --alsologtostderr -v=5 --driver=kvm2 : (50.207881115s)
docker_test.go:110: (dbg) Run:  out/minikube-linux-amd64 -p force-systemd-env-630963 ssh "docker info --format {{.CgroupDriver}}"
helpers_test.go:175: Cleaning up "force-systemd-env-630963" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-amd64 delete -p force-systemd-env-630963
--- PASS: TestForceSystemdEnv (51.49s)

                                                
                                    
x
+
TestErrorSpam/setup (43.06s)

                                                
                                                
=== RUN   TestErrorSpam/setup
error_spam_test.go:81: (dbg) Run:  out/minikube-linux-amd64 start -p nospam-883619 -n=1 --memory=3072 --wait=false --log_dir=/tmp/nospam-883619 --driver=kvm2 
error_spam_test.go:81: (dbg) Done: out/minikube-linux-amd64 start -p nospam-883619 -n=1 --memory=3072 --wait=false --log_dir=/tmp/nospam-883619 --driver=kvm2 : (43.055743426s)
--- PASS: TestErrorSpam/setup (43.06s)

                                                
                                    
x
+
TestErrorSpam/start (0.33s)

                                                
                                                
=== RUN   TestErrorSpam/start
error_spam_test.go:206: Cleaning up 1 logfile(s) ...
error_spam_test.go:149: (dbg) Run:  out/minikube-linux-amd64 -p nospam-883619 --log_dir /tmp/nospam-883619 start --dry-run
error_spam_test.go:149: (dbg) Run:  out/minikube-linux-amd64 -p nospam-883619 --log_dir /tmp/nospam-883619 start --dry-run
error_spam_test.go:172: (dbg) Run:  out/minikube-linux-amd64 -p nospam-883619 --log_dir /tmp/nospam-883619 start --dry-run
--- PASS: TestErrorSpam/start (0.33s)

                                                
                                    
x
+
TestErrorSpam/status (0.69s)

                                                
                                                
=== RUN   TestErrorSpam/status
error_spam_test.go:206: Cleaning up 0 logfile(s) ...
error_spam_test.go:149: (dbg) Run:  out/minikube-linux-amd64 -p nospam-883619 --log_dir /tmp/nospam-883619 status
error_spam_test.go:149: (dbg) Run:  out/minikube-linux-amd64 -p nospam-883619 --log_dir /tmp/nospam-883619 status
error_spam_test.go:172: (dbg) Run:  out/minikube-linux-amd64 -p nospam-883619 --log_dir /tmp/nospam-883619 status
--- PASS: TestErrorSpam/status (0.69s)

                                                
                                    
x
+
TestErrorSpam/pause (1.29s)

                                                
                                                
=== RUN   TestErrorSpam/pause
error_spam_test.go:206: Cleaning up 0 logfile(s) ...
error_spam_test.go:149: (dbg) Run:  out/minikube-linux-amd64 -p nospam-883619 --log_dir /tmp/nospam-883619 pause
error_spam_test.go:149: (dbg) Run:  out/minikube-linux-amd64 -p nospam-883619 --log_dir /tmp/nospam-883619 pause
error_spam_test.go:172: (dbg) Run:  out/minikube-linux-amd64 -p nospam-883619 --log_dir /tmp/nospam-883619 pause
--- PASS: TestErrorSpam/pause (1.29s)

                                                
                                    
x
+
TestErrorSpam/unpause (1.58s)

                                                
                                                
=== RUN   TestErrorSpam/unpause
error_spam_test.go:206: Cleaning up 0 logfile(s) ...
error_spam_test.go:149: (dbg) Run:  out/minikube-linux-amd64 -p nospam-883619 --log_dir /tmp/nospam-883619 unpause
error_spam_test.go:149: (dbg) Run:  out/minikube-linux-amd64 -p nospam-883619 --log_dir /tmp/nospam-883619 unpause
error_spam_test.go:172: (dbg) Run:  out/minikube-linux-amd64 -p nospam-883619 --log_dir /tmp/nospam-883619 unpause
--- PASS: TestErrorSpam/unpause (1.58s)

                                                
                                    
x
+
TestErrorSpam/stop (16.62s)

                                                
                                                
=== RUN   TestErrorSpam/stop
error_spam_test.go:206: Cleaning up 0 logfile(s) ...
error_spam_test.go:149: (dbg) Run:  out/minikube-linux-amd64 -p nospam-883619 --log_dir /tmp/nospam-883619 stop
error_spam_test.go:149: (dbg) Done: out/minikube-linux-amd64 -p nospam-883619 --log_dir /tmp/nospam-883619 stop: (13.108885134s)
error_spam_test.go:149: (dbg) Run:  out/minikube-linux-amd64 -p nospam-883619 --log_dir /tmp/nospam-883619 stop
error_spam_test.go:149: (dbg) Done: out/minikube-linux-amd64 -p nospam-883619 --log_dir /tmp/nospam-883619 stop: (1.454799381s)
error_spam_test.go:172: (dbg) Run:  out/minikube-linux-amd64 -p nospam-883619 --log_dir /tmp/nospam-883619 stop
error_spam_test.go:172: (dbg) Done: out/minikube-linux-amd64 -p nospam-883619 --log_dir /tmp/nospam-883619 stop: (2.057932786s)
--- PASS: TestErrorSpam/stop (16.62s)

                                                
                                    
x
+
TestFunctional/serial/CopySyncFile (0s)

                                                
                                                
=== RUN   TestFunctional/serial/CopySyncFile
functional_test.go:1860: local sync path: /home/jenkins/minikube-integration/21966-18241/.minikube/files/etc/test/nested/copy/22148/hosts
--- PASS: TestFunctional/serial/CopySyncFile (0.00s)

                                                
                                    
x
+
TestFunctional/serial/StartWithProxy (83.84s)

                                                
                                                
=== RUN   TestFunctional/serial/StartWithProxy
functional_test.go:2239: (dbg) Run:  out/minikube-linux-amd64 start -p functional-086932 --memory=4096 --apiserver-port=8441 --wait=all --driver=kvm2 
functional_test.go:2239: (dbg) Done: out/minikube-linux-amd64 start -p functional-086932 --memory=4096 --apiserver-port=8441 --wait=all --driver=kvm2 : (1m23.836669952s)
--- PASS: TestFunctional/serial/StartWithProxy (83.84s)

                                                
                                    
x
+
TestFunctional/serial/AuditLog (0s)

                                                
                                                
=== RUN   TestFunctional/serial/AuditLog
--- PASS: TestFunctional/serial/AuditLog (0.00s)

                                                
                                    
x
+
TestFunctional/serial/SoftStart (54.35s)

                                                
                                                
=== RUN   TestFunctional/serial/SoftStart
I1123 08:03:56.721947   22148 config.go:182] Loaded profile config "functional-086932": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
functional_test.go:674: (dbg) Run:  out/minikube-linux-amd64 start -p functional-086932 --alsologtostderr -v=8
E1123 08:04:07.500261   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/addons-085189/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:04:07.506656   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/addons-085189/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:04:07.518115   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/addons-085189/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:04:07.539584   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/addons-085189/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:04:07.580999   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/addons-085189/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:04:07.662459   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/addons-085189/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:04:07.823982   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/addons-085189/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:04:08.145681   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/addons-085189/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:04:08.787778   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/addons-085189/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:04:10.069433   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/addons-085189/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:04:12.632367   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/addons-085189/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:04:17.753917   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/addons-085189/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:04:27.995760   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/addons-085189/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:04:48.477215   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/addons-085189/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
functional_test.go:674: (dbg) Done: out/minikube-linux-amd64 start -p functional-086932 --alsologtostderr -v=8: (54.344641822s)
functional_test.go:678: soft start took 54.345373374s for "functional-086932" cluster.
I1123 08:04:51.066982   22148 config.go:182] Loaded profile config "functional-086932": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
--- PASS: TestFunctional/serial/SoftStart (54.35s)

                                                
                                    
x
+
TestFunctional/serial/KubeContext (0.04s)

                                                
                                                
=== RUN   TestFunctional/serial/KubeContext
functional_test.go:696: (dbg) Run:  kubectl config current-context
--- PASS: TestFunctional/serial/KubeContext (0.04s)

                                                
                                    
x
+
TestFunctional/serial/KubectlGetPods (0.09s)

                                                
                                                
=== RUN   TestFunctional/serial/KubectlGetPods
functional_test.go:711: (dbg) Run:  kubectl --context functional-086932 get po -A
--- PASS: TestFunctional/serial/KubectlGetPods (0.09s)

                                                
                                    
x
+
TestFunctional/serial/CacheCmd/cache/add_remote (2.18s)

                                                
                                                
=== RUN   TestFunctional/serial/CacheCmd/cache/add_remote
functional_test.go:1064: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 cache add registry.k8s.io/pause:3.1
functional_test.go:1064: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 cache add registry.k8s.io/pause:3.3
functional_test.go:1064: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 cache add registry.k8s.io/pause:latest
--- PASS: TestFunctional/serial/CacheCmd/cache/add_remote (2.18s)

                                                
                                    
x
+
TestFunctional/serial/CacheCmd/cache/add_local (1.3s)

                                                
                                                
=== RUN   TestFunctional/serial/CacheCmd/cache/add_local
functional_test.go:1092: (dbg) Run:  docker build -t minikube-local-cache-test:functional-086932 /tmp/TestFunctionalserialCacheCmdcacheadd_local2173208837/001
functional_test.go:1104: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 cache add minikube-local-cache-test:functional-086932
functional_test.go:1109: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 cache delete minikube-local-cache-test:functional-086932
functional_test.go:1098: (dbg) Run:  docker rmi minikube-local-cache-test:functional-086932
--- PASS: TestFunctional/serial/CacheCmd/cache/add_local (1.30s)

                                                
                                    
x
+
TestFunctional/serial/CacheCmd/cache/CacheDelete (0.06s)

                                                
                                                
=== RUN   TestFunctional/serial/CacheCmd/cache/CacheDelete
functional_test.go:1117: (dbg) Run:  out/minikube-linux-amd64 cache delete registry.k8s.io/pause:3.3
--- PASS: TestFunctional/serial/CacheCmd/cache/CacheDelete (0.06s)

                                                
                                    
x
+
TestFunctional/serial/CacheCmd/cache/list (0.06s)

                                                
                                                
=== RUN   TestFunctional/serial/CacheCmd/cache/list
functional_test.go:1125: (dbg) Run:  out/minikube-linux-amd64 cache list
--- PASS: TestFunctional/serial/CacheCmd/cache/list (0.06s)

                                                
                                    
x
+
TestFunctional/serial/CacheCmd/cache/verify_cache_inside_node (0.18s)

                                                
                                                
=== RUN   TestFunctional/serial/CacheCmd/cache/verify_cache_inside_node
functional_test.go:1139: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 ssh sudo crictl images
--- PASS: TestFunctional/serial/CacheCmd/cache/verify_cache_inside_node (0.18s)

                                                
                                    
x
+
TestFunctional/serial/CacheCmd/cache/cache_reload (1.04s)

                                                
                                                
=== RUN   TestFunctional/serial/CacheCmd/cache/cache_reload
functional_test.go:1162: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 ssh sudo docker rmi registry.k8s.io/pause:latest
functional_test.go:1168: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 ssh sudo crictl inspecti registry.k8s.io/pause:latest
functional_test.go:1168: (dbg) Non-zero exit: out/minikube-linux-amd64 -p functional-086932 ssh sudo crictl inspecti registry.k8s.io/pause:latest: exit status 1 (175.323917ms)

                                                
                                                
-- stdout --
	FATA[0000] no such image "registry.k8s.io/pause:latest" present 

                                                
                                                
-- /stdout --
** stderr ** 
	ssh: Process exited with status 1

                                                
                                                
** /stderr **
functional_test.go:1173: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 cache reload
functional_test.go:1178: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 ssh sudo crictl inspecti registry.k8s.io/pause:latest
--- PASS: TestFunctional/serial/CacheCmd/cache/cache_reload (1.04s)

                                                
                                    
x
+
TestFunctional/serial/CacheCmd/cache/delete (0.12s)

                                                
                                                
=== RUN   TestFunctional/serial/CacheCmd/cache/delete
functional_test.go:1187: (dbg) Run:  out/minikube-linux-amd64 cache delete registry.k8s.io/pause:3.1
functional_test.go:1187: (dbg) Run:  out/minikube-linux-amd64 cache delete registry.k8s.io/pause:latest
--- PASS: TestFunctional/serial/CacheCmd/cache/delete (0.12s)

                                                
                                    
x
+
TestFunctional/serial/MinikubeKubectlCmd (0.12s)

                                                
                                                
=== RUN   TestFunctional/serial/MinikubeKubectlCmd
functional_test.go:731: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 kubectl -- --context functional-086932 get pods
--- PASS: TestFunctional/serial/MinikubeKubectlCmd (0.12s)

                                                
                                    
x
+
TestFunctional/serial/MinikubeKubectlCmdDirectly (0.11s)

                                                
                                                
=== RUN   TestFunctional/serial/MinikubeKubectlCmdDirectly
functional_test.go:756: (dbg) Run:  out/kubectl --context functional-086932 get pods
--- PASS: TestFunctional/serial/MinikubeKubectlCmdDirectly (0.11s)

                                                
                                    
x
+
TestFunctional/serial/ExtraConfig (55.76s)

                                                
                                                
=== RUN   TestFunctional/serial/ExtraConfig
functional_test.go:772: (dbg) Run:  out/minikube-linux-amd64 start -p functional-086932 --extra-config=apiserver.enable-admission-plugins=NamespaceAutoProvision --wait=all
E1123 08:05:29.440496   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/addons-085189/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
functional_test.go:772: (dbg) Done: out/minikube-linux-amd64 start -p functional-086932 --extra-config=apiserver.enable-admission-plugins=NamespaceAutoProvision --wait=all: (55.761405698s)
functional_test.go:776: restart took 55.761535256s for "functional-086932" cluster.
I1123 08:05:52.131300   22148 config.go:182] Loaded profile config "functional-086932": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
--- PASS: TestFunctional/serial/ExtraConfig (55.76s)

                                                
                                    
x
+
TestFunctional/serial/ComponentHealth (0.07s)

                                                
                                                
=== RUN   TestFunctional/serial/ComponentHealth
functional_test.go:825: (dbg) Run:  kubectl --context functional-086932 get po -l tier=control-plane -n kube-system -o=json
functional_test.go:840: etcd phase: Running
functional_test.go:850: etcd status: Ready
functional_test.go:840: kube-apiserver phase: Running
functional_test.go:850: kube-apiserver status: Ready
functional_test.go:840: kube-controller-manager phase: Running
functional_test.go:850: kube-controller-manager status: Ready
functional_test.go:840: kube-scheduler phase: Running
functional_test.go:850: kube-scheduler status: Ready
--- PASS: TestFunctional/serial/ComponentHealth (0.07s)

                                                
                                    
x
+
TestFunctional/serial/LogsCmd (1.06s)

                                                
                                                
=== RUN   TestFunctional/serial/LogsCmd
functional_test.go:1251: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 logs
functional_test.go:1251: (dbg) Done: out/minikube-linux-amd64 -p functional-086932 logs: (1.060035445s)
--- PASS: TestFunctional/serial/LogsCmd (1.06s)

                                                
                                    
x
+
TestFunctional/serial/LogsFileCmd (1.02s)

                                                
                                                
=== RUN   TestFunctional/serial/LogsFileCmd
functional_test.go:1265: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 logs --file /tmp/TestFunctionalserialLogsFileCmd3946088742/001/logs.txt
functional_test.go:1265: (dbg) Done: out/minikube-linux-amd64 -p functional-086932 logs --file /tmp/TestFunctionalserialLogsFileCmd3946088742/001/logs.txt: (1.015079128s)
--- PASS: TestFunctional/serial/LogsFileCmd (1.02s)

                                                
                                    
x
+
TestFunctional/serial/InvalidService (3.91s)

                                                
                                                
=== RUN   TestFunctional/serial/InvalidService
functional_test.go:2326: (dbg) Run:  kubectl --context functional-086932 apply -f testdata/invalidsvc.yaml
functional_test.go:2340: (dbg) Run:  out/minikube-linux-amd64 service invalid-svc -p functional-086932
functional_test.go:2340: (dbg) Non-zero exit: out/minikube-linux-amd64 service invalid-svc -p functional-086932: exit status 115 (230.699775ms)

                                                
                                                
-- stdout --
	┌───────────┬─────────────┬─────────────┬─────────────────────────────┐
	│ NAMESPACE │    NAME     │ TARGET PORT │             URL             │
	├───────────┼─────────────┼─────────────┼─────────────────────────────┤
	│ default   │ invalid-svc │ 80          │ http://192.168.39.224:31598 │
	└───────────┴─────────────┴─────────────┴─────────────────────────────┘
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	X Exiting due to SVC_UNREACHABLE: service not available: no running pod for service invalid-svc found
	* 
	╭─────────────────────────────────────────────────────────────────────────────────────────────╮
	│                                                                                             │
	│    * If the above advice does not help, please let us know:                                 │
	│      https://github.com/kubernetes/minikube/issues/new/choose                               │
	│                                                                                             │
	│    * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue.    │
	│    * Please also attach the following file to the GitHub issue:                             │
	│    * - /tmp/minikube_service_96b204199e3191fa1740d4430b018a3c8028d52d_0.log                 │
	│                                                                                             │
	╰─────────────────────────────────────────────────────────────────────────────────────────────╯

                                                
                                                
** /stderr **
functional_test.go:2332: (dbg) Run:  kubectl --context functional-086932 delete -f testdata/invalidsvc.yaml
--- PASS: TestFunctional/serial/InvalidService (3.91s)

                                                
                                    
x
+
TestFunctional/parallel/ConfigCmd (0.4s)

                                                
                                                
=== RUN   TestFunctional/parallel/ConfigCmd
=== PAUSE TestFunctional/parallel/ConfigCmd

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/ConfigCmd
functional_test.go:1214: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 config unset cpus
functional_test.go:1214: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 config get cpus
functional_test.go:1214: (dbg) Non-zero exit: out/minikube-linux-amd64 -p functional-086932 config get cpus: exit status 14 (63.580214ms)

                                                
                                                
** stderr ** 
	Error: specified key could not be found in config

                                                
                                                
** /stderr **
functional_test.go:1214: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 config set cpus 2
functional_test.go:1214: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 config get cpus
functional_test.go:1214: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 config unset cpus
functional_test.go:1214: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 config get cpus
functional_test.go:1214: (dbg) Non-zero exit: out/minikube-linux-amd64 -p functional-086932 config get cpus: exit status 14 (69.305047ms)

                                                
                                                
** stderr ** 
	Error: specified key could not be found in config

                                                
                                                
** /stderr **
--- PASS: TestFunctional/parallel/ConfigCmd (0.40s)

                                                
                                    
x
+
TestFunctional/parallel/DashboardCmd (42.63s)

                                                
                                                
=== RUN   TestFunctional/parallel/DashboardCmd
=== PAUSE TestFunctional/parallel/DashboardCmd

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/DashboardCmd
functional_test.go:920: (dbg) daemon: [out/minikube-linux-amd64 dashboard --url --port 36195 -p functional-086932 --alsologtostderr -v=1]
functional_test.go:925: (dbg) stopping [out/minikube-linux-amd64 dashboard --url --port 36195 -p functional-086932 --alsologtostderr -v=1] ...
helpers_test.go:525: unable to kill pid 27974: os: process already finished
--- PASS: TestFunctional/parallel/DashboardCmd (42.63s)

                                                
                                    
x
+
TestFunctional/parallel/DryRun (0.24s)

                                                
                                                
=== RUN   TestFunctional/parallel/DryRun
=== PAUSE TestFunctional/parallel/DryRun

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/DryRun
functional_test.go:989: (dbg) Run:  out/minikube-linux-amd64 start -p functional-086932 --dry-run --memory 250MB --alsologtostderr --driver=kvm2 
functional_test.go:989: (dbg) Non-zero exit: out/minikube-linux-amd64 start -p functional-086932 --dry-run --memory 250MB --alsologtostderr --driver=kvm2 : exit status 23 (112.852994ms)

                                                
                                                
-- stdout --
	* [functional-086932] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
	  - MINIKUBE_LOCATION=21966
	  - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	  - KUBECONFIG=/home/jenkins/minikube-integration/21966-18241/kubeconfig
	  - MINIKUBE_HOME=/home/jenkins/minikube-integration/21966-18241/.minikube
	  - MINIKUBE_BIN=out/minikube-linux-amd64
	  - MINIKUBE_FORCE_SYSTEMD=
	* Using the kvm2 driver based on existing profile
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	I1123 08:06:10.773078   27900 out.go:360] Setting OutFile to fd 1 ...
	I1123 08:06:10.773338   27900 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I1123 08:06:10.773348   27900 out.go:374] Setting ErrFile to fd 2...
	I1123 08:06:10.773355   27900 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I1123 08:06:10.773562   27900 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21966-18241/.minikube/bin
	I1123 08:06:10.774002   27900 out.go:368] Setting JSON to false
	I1123 08:06:10.774819   27900 start.go:133] hostinfo: {"hostname":"ubuntu-20-agent-3","uptime":2920,"bootTime":1763882251,"procs":175,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"22.04","kernelVersion":"6.8.0-1044-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
	I1123 08:06:10.774873   27900 start.go:143] virtualization: kvm guest
	I1123 08:06:10.776683   27900 out.go:179] * [functional-086932] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
	I1123 08:06:10.777857   27900 notify.go:221] Checking for updates...
	I1123 08:06:10.777883   27900 out.go:179]   - MINIKUBE_LOCATION=21966
	I1123 08:06:10.779291   27900 out.go:179]   - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I1123 08:06:10.780684   27900 out.go:179]   - KUBECONFIG=/home/jenkins/minikube-integration/21966-18241/kubeconfig
	I1123 08:06:10.785448   27900 out.go:179]   - MINIKUBE_HOME=/home/jenkins/minikube-integration/21966-18241/.minikube
	I1123 08:06:10.786828   27900 out.go:179]   - MINIKUBE_BIN=out/minikube-linux-amd64
	I1123 08:06:10.788279   27900 out.go:179]   - MINIKUBE_FORCE_SYSTEMD=
	I1123 08:06:10.790015   27900 config.go:182] Loaded profile config "functional-086932": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
	I1123 08:06:10.790487   27900 driver.go:422] Setting default libvirt URI to qemu:///system
	I1123 08:06:10.824405   27900 out.go:179] * Using the kvm2 driver based on existing profile
	I1123 08:06:10.825567   27900 start.go:309] selected driver: kvm2
	I1123 08:06:10.825589   27900 start.go:927] validating driver "kvm2" against &{Name:functional-086932 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21924/minikube-v1.37.0-1763503576-21924-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:4096 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8441 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{Kuber
netesVersion:v1.34.1 ClusterName:functional-086932 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.39.224 Port:8441 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0
s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I1123 08:06:10.825675   27900 start.go:938] status for kvm2: {Installed:true Healthy:true Running:true NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
	I1123 08:06:10.827909   27900 out.go:203] 
	W1123 08:06:10.829186   27900 out.go:285] X Exiting due to RSRC_INSUFFICIENT_REQ_MEMORY: Requested memory allocation 250MiB is less than the usable minimum of 1800MB
	X Exiting due to RSRC_INSUFFICIENT_REQ_MEMORY: Requested memory allocation 250MiB is less than the usable minimum of 1800MB
	I1123 08:06:10.830263   27900 out.go:203] 

                                                
                                                
** /stderr **
functional_test.go:1006: (dbg) Run:  out/minikube-linux-amd64 start -p functional-086932 --dry-run --alsologtostderr -v=1 --driver=kvm2 
--- PASS: TestFunctional/parallel/DryRun (0.24s)

                                                
                                    
x
+
TestFunctional/parallel/InternationalLanguage (0.13s)

                                                
                                                
=== RUN   TestFunctional/parallel/InternationalLanguage
=== PAUSE TestFunctional/parallel/InternationalLanguage

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/InternationalLanguage
functional_test.go:1035: (dbg) Run:  out/minikube-linux-amd64 start -p functional-086932 --dry-run --memory 250MB --alsologtostderr --driver=kvm2 
functional_test.go:1035: (dbg) Non-zero exit: out/minikube-linux-amd64 start -p functional-086932 --dry-run --memory 250MB --alsologtostderr --driver=kvm2 : exit status 23 (127.299288ms)

                                                
                                                
-- stdout --
	* [functional-086932] minikube v1.37.0 sur Ubuntu 22.04 (kvm/amd64)
	  - MINIKUBE_LOCATION=21966
	  - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	  - KUBECONFIG=/home/jenkins/minikube-integration/21966-18241/kubeconfig
	  - MINIKUBE_HOME=/home/jenkins/minikube-integration/21966-18241/.minikube
	  - MINIKUBE_BIN=out/minikube-linux-amd64
	  - MINIKUBE_FORCE_SYSTEMD=
	* Utilisation du pilote kvm2 basé sur le profil existant
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	I1123 08:06:11.027529   27932 out.go:360] Setting OutFile to fd 1 ...
	I1123 08:06:11.027661   27932 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I1123 08:06:11.027671   27932 out.go:374] Setting ErrFile to fd 2...
	I1123 08:06:11.027677   27932 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I1123 08:06:11.027973   27932 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21966-18241/.minikube/bin
	I1123 08:06:11.028415   27932 out.go:368] Setting JSON to false
	I1123 08:06:11.029257   27932 start.go:133] hostinfo: {"hostname":"ubuntu-20-agent-3","uptime":2920,"bootTime":1763882251,"procs":175,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"22.04","kernelVersion":"6.8.0-1044-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
	I1123 08:06:11.029320   27932 start.go:143] virtualization: kvm guest
	I1123 08:06:11.031285   27932 out.go:179] * [functional-086932] minikube v1.37.0 sur Ubuntu 22.04 (kvm/amd64)
	I1123 08:06:11.032582   27932 out.go:179]   - MINIKUBE_LOCATION=21966
	I1123 08:06:11.032599   27932 notify.go:221] Checking for updates...
	I1123 08:06:11.034962   27932 out.go:179]   - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	I1123 08:06:11.036253   27932 out.go:179]   - KUBECONFIG=/home/jenkins/minikube-integration/21966-18241/kubeconfig
	I1123 08:06:11.037518   27932 out.go:179]   - MINIKUBE_HOME=/home/jenkins/minikube-integration/21966-18241/.minikube
	I1123 08:06:11.038697   27932 out.go:179]   - MINIKUBE_BIN=out/minikube-linux-amd64
	I1123 08:06:11.039822   27932 out.go:179]   - MINIKUBE_FORCE_SYSTEMD=
	I1123 08:06:11.041570   27932 config.go:182] Loaded profile config "functional-086932": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
	I1123 08:06:11.042188   27932 driver.go:422] Setting default libvirt URI to qemu:///system
	I1123 08:06:11.076340   27932 out.go:179] * Utilisation du pilote kvm2 basé sur le profil existant
	I1123 08:06:11.077504   27932 start.go:309] selected driver: kvm2
	I1123 08:06:11.077519   27932 start.go:927] validating driver "kvm2" against &{Name:functional-086932 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/21924/minikube-v1.37.0-1763503576-21924-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1763789673-21948@sha256:bb10ebd3ca086eea12c038085866fb2f6cfa67385dcb830c4deb5e36ced6b53f Memory:4096 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8441 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{Kuber
netesVersion:v1.34.1 ClusterName:functional-086932 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.39.224 Port:8441 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0
s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
	I1123 08:06:11.077603   27932 start.go:938] status for kvm2: {Installed:true Healthy:true Running:true NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
	I1123 08:06:11.080054   27932 out.go:203] 
	W1123 08:06:11.081420   27932 out.go:285] X Fermeture en raison de RSRC_INSUFFICIENT_REQ_MEMORY : L'allocation de mémoire demandée 250 Mio est inférieure au minimum utilisable de 1800 Mo
	X Fermeture en raison de RSRC_INSUFFICIENT_REQ_MEMORY : L'allocation de mémoire demandée 250 Mio est inférieure au minimum utilisable de 1800 Mo
	I1123 08:06:11.082743   27932 out.go:203] 

                                                
                                                
** /stderr **
--- PASS: TestFunctional/parallel/InternationalLanguage (0.13s)

                                                
                                    
x
+
TestFunctional/parallel/StatusCmd (0.68s)

                                                
                                                
=== RUN   TestFunctional/parallel/StatusCmd
=== PAUSE TestFunctional/parallel/StatusCmd

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/StatusCmd
functional_test.go:869: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 status
functional_test.go:875: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 status -f host:{{.Host}},kublet:{{.Kubelet}},apiserver:{{.APIServer}},kubeconfig:{{.Kubeconfig}}
functional_test.go:887: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 status -o json
--- PASS: TestFunctional/parallel/StatusCmd (0.68s)

                                                
                                    
x
+
TestFunctional/parallel/ServiceCmdConnect (9.5s)

                                                
                                                
=== RUN   TestFunctional/parallel/ServiceCmdConnect
=== PAUSE TestFunctional/parallel/ServiceCmdConnect

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/ServiceCmdConnect
functional_test.go:1636: (dbg) Run:  kubectl --context functional-086932 create deployment hello-node-connect --image kicbase/echo-server
functional_test.go:1640: (dbg) Run:  kubectl --context functional-086932 expose deployment hello-node-connect --type=NodePort --port=8080
functional_test.go:1645: (dbg) TestFunctional/parallel/ServiceCmdConnect: waiting 10m0s for pods matching "app=hello-node-connect" in namespace "default" ...
helpers_test.go:352: "hello-node-connect-7d85dfc575-nlmmt" [7d6bfa6f-7018-4d09-a38b-aec4296610a2] Pending / Ready:ContainersNotReady (containers with unready status: [echo-server]) / ContainersReady:ContainersNotReady (containers with unready status: [echo-server])
helpers_test.go:352: "hello-node-connect-7d85dfc575-nlmmt" [7d6bfa6f-7018-4d09-a38b-aec4296610a2] Running
functional_test.go:1645: (dbg) TestFunctional/parallel/ServiceCmdConnect: app=hello-node-connect healthy within 9.005280562s
functional_test.go:1654: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 service hello-node-connect --url
functional_test.go:1660: found endpoint for hello-node-connect: http://192.168.39.224:30950
functional_test.go:1680: http://192.168.39.224:30950: success! body:
Request served by hello-node-connect-7d85dfc575-nlmmt

                                                
                                                
HTTP/1.1 GET /

                                                
                                                
Host: 192.168.39.224:30950
Accept-Encoding: gzip
User-Agent: Go-http-client/1.1
--- PASS: TestFunctional/parallel/ServiceCmdConnect (9.50s)

                                                
                                    
x
+
TestFunctional/parallel/AddonsCmd (0.16s)

                                                
                                                
=== RUN   TestFunctional/parallel/AddonsCmd
=== PAUSE TestFunctional/parallel/AddonsCmd

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/AddonsCmd
functional_test.go:1695: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 addons list
functional_test.go:1707: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 addons list -o json
--- PASS: TestFunctional/parallel/AddonsCmd (0.16s)

                                                
                                    
x
+
TestFunctional/parallel/PersistentVolumeClaim (54.83s)

                                                
                                                
=== RUN   TestFunctional/parallel/PersistentVolumeClaim
=== PAUSE TestFunctional/parallel/PersistentVolumeClaim

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/PersistentVolumeClaim
functional_test_pvc_test.go:50: (dbg) TestFunctional/parallel/PersistentVolumeClaim: waiting 4m0s for pods matching "integration-test=storage-provisioner" in namespace "kube-system" ...
helpers_test.go:352: "storage-provisioner" [aa8a96a7-55e3-4fb5-8e10-d8ff9d9de1b7] Running
functional_test_pvc_test.go:50: (dbg) TestFunctional/parallel/PersistentVolumeClaim: integration-test=storage-provisioner healthy within 6.005686217s
functional_test_pvc_test.go:55: (dbg) Run:  kubectl --context functional-086932 get storageclass -o=json
functional_test_pvc_test.go:75: (dbg) Run:  kubectl --context functional-086932 apply -f testdata/storage-provisioner/pvc.yaml
functional_test_pvc_test.go:82: (dbg) Run:  kubectl --context functional-086932 get pvc myclaim -o=json
I1123 08:06:05.793789   22148 retry.go:31] will retry after 2.549011413s: testpvc phase = "Pending", want "Bound" (msg={TypeMeta:{Kind:PersistentVolumeClaim APIVersion:v1} ObjectMeta:{Name:myclaim GenerateName: Namespace:default SelfLink: UID:676d1ad1-8a68-4d15-8193-1cc67edf2ee4 ResourceVersion:754 Generation:0 CreationTimestamp:2025-11-23 08:06:05 +0000 UTC DeletionTimestamp:<nil> DeletionGracePeriodSeconds:<nil> Labels:map[] Annotations:map[kubectl.kubernetes.io/last-applied-configuration:{"apiVersion":"v1","kind":"PersistentVolumeClaim","metadata":{"annotations":{},"name":"myclaim","namespace":"default"},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"500Mi"}},"volumeMode":"Filesystem"}}
volume.beta.kubernetes.io/storage-provisioner:k8s.io/minikube-hostpath volume.kubernetes.io/storage-provisioner:k8s.io/minikube-hostpath] OwnerReferences:[] Finalizers:[kubernetes.io/pvc-protection] ManagedFields:[]} Spec:{AccessModes:[ReadWriteOnce] Selector:nil Resources:{Limits:map[] Requests:map[storage:{i:{value:524288000 scale:0} d:{Dec:<nil>} s:500Mi Format:BinarySI}]} VolumeName: StorageClassName:0xc0019cb600 VolumeMode:0xc0019cb630 DataSource:nil DataSourceRef:nil VolumeAttributesClassName:<nil>} Status:{Phase:Pending AccessModes:[] Capacity:map[] Conditions:[] AllocatedResources:map[] AllocatedResourceStatuses:map[] CurrentVolumeAttributesClassName:<nil> ModifyVolumeStatus:nil}})
functional_test_pvc_test.go:82: (dbg) Run:  kubectl --context functional-086932 get pvc myclaim -o=json
functional_test_pvc_test.go:131: (dbg) Run:  kubectl --context functional-086932 apply -f testdata/storage-provisioner/pod.yaml
I1123 08:06:08.531968   22148 detect.go:223] nested VM detected
functional_test_pvc_test.go:140: (dbg) TestFunctional/parallel/PersistentVolumeClaim: waiting 6m0s for pods matching "test=storage-provisioner" in namespace "default" ...
helpers_test.go:352: "sp-pod" [ff346e21-fee6-4468-8f6e-93959093eb29] Pending
helpers_test.go:352: "sp-pod" [ff346e21-fee6-4468-8f6e-93959093eb29] Pending / Ready:ContainersNotReady (containers with unready status: [myfrontend]) / ContainersReady:ContainersNotReady (containers with unready status: [myfrontend])
helpers_test.go:352: "sp-pod" [ff346e21-fee6-4468-8f6e-93959093eb29] Running
functional_test_pvc_test.go:140: (dbg) TestFunctional/parallel/PersistentVolumeClaim: test=storage-provisioner healthy within 14.006021861s
functional_test_pvc_test.go:106: (dbg) Run:  kubectl --context functional-086932 exec sp-pod -- touch /tmp/mount/foo
functional_test_pvc_test.go:112: (dbg) Run:  kubectl --context functional-086932 delete -f testdata/storage-provisioner/pod.yaml
functional_test_pvc_test.go:112: (dbg) Done: kubectl --context functional-086932 delete -f testdata/storage-provisioner/pod.yaml: (2.28314313s)
functional_test_pvc_test.go:131: (dbg) Run:  kubectl --context functional-086932 apply -f testdata/storage-provisioner/pod.yaml
I1123 08:06:25.210944   22148 detect.go:223] nested VM detected
functional_test_pvc_test.go:140: (dbg) TestFunctional/parallel/PersistentVolumeClaim: waiting 6m0s for pods matching "test=storage-provisioner" in namespace "default" ...
helpers_test.go:352: "sp-pod" [eac69851-37cf-49d3-af89-e9acaf9a34bb] Pending
helpers_test.go:352: "sp-pod" [eac69851-37cf-49d3-af89-e9acaf9a34bb] Pending / Ready:ContainersNotReady (containers with unready status: [myfrontend]) / ContainersReady:ContainersNotReady (containers with unready status: [myfrontend])
helpers_test.go:352: "sp-pod" [eac69851-37cf-49d3-af89-e9acaf9a34bb] Running
functional_test_pvc_test.go:140: (dbg) TestFunctional/parallel/PersistentVolumeClaim: test=storage-provisioner healthy within 29.004985667s
functional_test_pvc_test.go:120: (dbg) Run:  kubectl --context functional-086932 exec sp-pod -- ls /tmp/mount
--- PASS: TestFunctional/parallel/PersistentVolumeClaim (54.83s)

                                                
                                    
x
+
TestFunctional/parallel/SSHCmd (0.36s)

                                                
                                                
=== RUN   TestFunctional/parallel/SSHCmd
=== PAUSE TestFunctional/parallel/SSHCmd

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/SSHCmd
functional_test.go:1730: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 ssh "echo hello"
functional_test.go:1747: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 ssh "cat /etc/hostname"
--- PASS: TestFunctional/parallel/SSHCmd (0.36s)

                                                
                                    
x
+
TestFunctional/parallel/CpCmd (1.21s)

                                                
                                                
=== RUN   TestFunctional/parallel/CpCmd
=== PAUSE TestFunctional/parallel/CpCmd

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/CpCmd
helpers_test.go:573: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 cp testdata/cp-test.txt /home/docker/cp-test.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 ssh -n functional-086932 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 cp functional-086932:/home/docker/cp-test.txt /tmp/TestFunctionalparallelCpCmd2579216515/001/cp-test.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 ssh -n functional-086932 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 cp testdata/cp-test.txt /tmp/does/not/exist/cp-test.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 ssh -n functional-086932 "sudo cat /tmp/does/not/exist/cp-test.txt"
--- PASS: TestFunctional/parallel/CpCmd (1.21s)

                                                
                                    
x
+
TestFunctional/parallel/MySQL (39.87s)

                                                
                                                
=== RUN   TestFunctional/parallel/MySQL
=== PAUSE TestFunctional/parallel/MySQL

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/MySQL
functional_test.go:1798: (dbg) Run:  kubectl --context functional-086932 replace --force -f testdata/mysql.yaml
functional_test.go:1804: (dbg) TestFunctional/parallel/MySQL: waiting 10m0s for pods matching "app=mysql" in namespace "default" ...
helpers_test.go:352: "mysql-5bb876957f-kkm88" [3b348b77-79b2-450b-84d4-d700dd4d51e8] Pending / Ready:ContainersNotReady (containers with unready status: [mysql]) / ContainersReady:ContainersNotReady (containers with unready status: [mysql])
helpers_test.go:352: "mysql-5bb876957f-kkm88" [3b348b77-79b2-450b-84d4-d700dd4d51e8] Running
functional_test.go:1804: (dbg) TestFunctional/parallel/MySQL: app=mysql healthy within 34.030171822s
functional_test.go:1812: (dbg) Run:  kubectl --context functional-086932 exec mysql-5bb876957f-kkm88 -- mysql -ppassword -e "show databases;"
functional_test.go:1812: (dbg) Non-zero exit: kubectl --context functional-086932 exec mysql-5bb876957f-kkm88 -- mysql -ppassword -e "show databases;": exit status 1 (191.209045ms)

                                                
                                                
** stderr ** 
	mysql: [Warning] Using a password on the command line interface can be insecure.
	ERROR 1045 (28000): Access denied for user 'root'@'localhost' (using password: YES)
	command terminated with exit code 1

                                                
                                                
** /stderr **
I1123 08:06:43.687954   22148 retry.go:31] will retry after 1.325926935s: exit status 1
functional_test.go:1812: (dbg) Run:  kubectl --context functional-086932 exec mysql-5bb876957f-kkm88 -- mysql -ppassword -e "show databases;"
functional_test.go:1812: (dbg) Non-zero exit: kubectl --context functional-086932 exec mysql-5bb876957f-kkm88 -- mysql -ppassword -e "show databases;": exit status 1 (230.252112ms)

                                                
                                                
** stderr ** 
	mysql: [Warning] Using a password on the command line interface can be insecure.
	ERROR 1045 (28000): Access denied for user 'root'@'localhost' (using password: YES)
	command terminated with exit code 1

                                                
                                                
** /stderr **
I1123 08:06:45.244646   22148 retry.go:31] will retry after 1.711774098s: exit status 1
functional_test.go:1812: (dbg) Run:  kubectl --context functional-086932 exec mysql-5bb876957f-kkm88 -- mysql -ppassword -e "show databases;"
functional_test.go:1812: (dbg) Non-zero exit: kubectl --context functional-086932 exec mysql-5bb876957f-kkm88 -- mysql -ppassword -e "show databases;": exit status 1 (139.690821ms)

                                                
                                                
** stderr ** 
	mysql: [Warning] Using a password on the command line interface can be insecure.
	ERROR 2002 (HY000): Can't connect to local MySQL server through socket '/var/run/mysqld/mysqld.sock' (2)
	command terminated with exit code 1

                                                
                                                
** /stderr **
I1123 08:06:47.096881   22148 retry.go:31] will retry after 1.887284424s: exit status 1
functional_test.go:1812: (dbg) Run:  kubectl --context functional-086932 exec mysql-5bb876957f-kkm88 -- mysql -ppassword -e "show databases;"
E1123 08:06:51.362920   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/addons-085189/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
2025/11/23 08:06:53 [DEBUG] GET http://127.0.0.1:36195/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/
--- PASS: TestFunctional/parallel/MySQL (39.87s)

                                                
                                    
x
+
TestFunctional/parallel/FileSync (0.19s)

                                                
                                                
=== RUN   TestFunctional/parallel/FileSync
=== PAUSE TestFunctional/parallel/FileSync

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/FileSync
functional_test.go:1934: Checking for existence of /etc/test/nested/copy/22148/hosts within VM
functional_test.go:1936: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 ssh "sudo cat /etc/test/nested/copy/22148/hosts"
functional_test.go:1941: file sync test content: Test file for checking file sync process
--- PASS: TestFunctional/parallel/FileSync (0.19s)

                                                
                                    
x
+
TestFunctional/parallel/CertSync (1.21s)

                                                
                                                
=== RUN   TestFunctional/parallel/CertSync
=== PAUSE TestFunctional/parallel/CertSync

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/CertSync
functional_test.go:1977: Checking for existence of /etc/ssl/certs/22148.pem within VM
functional_test.go:1978: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 ssh "sudo cat /etc/ssl/certs/22148.pem"
functional_test.go:1977: Checking for existence of /usr/share/ca-certificates/22148.pem within VM
functional_test.go:1978: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 ssh "sudo cat /usr/share/ca-certificates/22148.pem"
functional_test.go:1977: Checking for existence of /etc/ssl/certs/51391683.0 within VM
functional_test.go:1978: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 ssh "sudo cat /etc/ssl/certs/51391683.0"
functional_test.go:2004: Checking for existence of /etc/ssl/certs/221482.pem within VM
functional_test.go:2005: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 ssh "sudo cat /etc/ssl/certs/221482.pem"
functional_test.go:2004: Checking for existence of /usr/share/ca-certificates/221482.pem within VM
functional_test.go:2005: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 ssh "sudo cat /usr/share/ca-certificates/221482.pem"
functional_test.go:2004: Checking for existence of /etc/ssl/certs/3ec20f2e.0 within VM
functional_test.go:2005: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 ssh "sudo cat /etc/ssl/certs/3ec20f2e.0"
--- PASS: TestFunctional/parallel/CertSync (1.21s)

                                                
                                    
x
+
TestFunctional/parallel/NodeLabels (0.06s)

                                                
                                                
=== RUN   TestFunctional/parallel/NodeLabels
=== PAUSE TestFunctional/parallel/NodeLabels

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/NodeLabels
functional_test.go:234: (dbg) Run:  kubectl --context functional-086932 get nodes --output=go-template "--template='{{range $k, $v := (index .items 0).metadata.labels}}{{$k}} {{end}}'"
--- PASS: TestFunctional/parallel/NodeLabels (0.06s)

                                                
                                    
x
+
TestFunctional/parallel/NonActiveRuntimeDisabled (0.19s)

                                                
                                                
=== RUN   TestFunctional/parallel/NonActiveRuntimeDisabled
=== PAUSE TestFunctional/parallel/NonActiveRuntimeDisabled

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/NonActiveRuntimeDisabled
functional_test.go:2032: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 ssh "sudo systemctl is-active crio"
functional_test.go:2032: (dbg) Non-zero exit: out/minikube-linux-amd64 -p functional-086932 ssh "sudo systemctl is-active crio": exit status 1 (189.50061ms)

                                                
                                                
-- stdout --
	inactive

                                                
                                                
-- /stdout --
** stderr ** 
	ssh: Process exited with status 3

                                                
                                                
** /stderr **
--- PASS: TestFunctional/parallel/NonActiveRuntimeDisabled (0.19s)

                                                
                                    
x
+
TestFunctional/parallel/License (0.44s)

                                                
                                                
=== RUN   TestFunctional/parallel/License
=== PAUSE TestFunctional/parallel/License

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/License
functional_test.go:2293: (dbg) Run:  out/minikube-linux-amd64 license
--- PASS: TestFunctional/parallel/License (0.44s)

                                                
                                    
x
+
TestFunctional/parallel/Version/short (0.06s)

                                                
                                                
=== RUN   TestFunctional/parallel/Version/short
=== PAUSE TestFunctional/parallel/Version/short

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/Version/short
functional_test.go:2261: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 version --short
--- PASS: TestFunctional/parallel/Version/short (0.06s)

                                                
                                    
x
+
TestFunctional/parallel/Version/components (0.65s)

                                                
                                                
=== RUN   TestFunctional/parallel/Version/components
=== PAUSE TestFunctional/parallel/Version/components

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/Version/components
functional_test.go:2275: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 version -o=json --components
--- PASS: TestFunctional/parallel/Version/components (0.65s)

                                                
                                    
x
+
TestFunctional/parallel/DockerEnv/bash (0.85s)

                                                
                                                
=== RUN   TestFunctional/parallel/DockerEnv/bash
functional_test.go:514: (dbg) Run:  /bin/bash -c "eval $(out/minikube-linux-amd64 -p functional-086932 docker-env) && out/minikube-linux-amd64 status -p functional-086932"
functional_test.go:537: (dbg) Run:  /bin/bash -c "eval $(out/minikube-linux-amd64 -p functional-086932 docker-env) && docker images"
--- PASS: TestFunctional/parallel/DockerEnv/bash (0.85s)

                                                
                                    
x
+
TestFunctional/parallel/UpdateContextCmd/no_changes (0.07s)

                                                
                                                
=== RUN   TestFunctional/parallel/UpdateContextCmd/no_changes
=== PAUSE TestFunctional/parallel/UpdateContextCmd/no_changes

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/UpdateContextCmd/no_changes
functional_test.go:2124: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 update-context --alsologtostderr -v=2
--- PASS: TestFunctional/parallel/UpdateContextCmd/no_changes (0.07s)

                                                
                                    
x
+
TestFunctional/parallel/UpdateContextCmd/no_minikube_cluster (0.11s)

                                                
                                                
=== RUN   TestFunctional/parallel/UpdateContextCmd/no_minikube_cluster
=== PAUSE TestFunctional/parallel/UpdateContextCmd/no_minikube_cluster

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/UpdateContextCmd/no_minikube_cluster
functional_test.go:2124: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 update-context --alsologtostderr -v=2
--- PASS: TestFunctional/parallel/UpdateContextCmd/no_minikube_cluster (0.11s)

                                                
                                    
x
+
TestFunctional/parallel/UpdateContextCmd/no_clusters (0.07s)

                                                
                                                
=== RUN   TestFunctional/parallel/UpdateContextCmd/no_clusters
=== PAUSE TestFunctional/parallel/UpdateContextCmd/no_clusters

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/UpdateContextCmd/no_clusters
functional_test.go:2124: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 update-context --alsologtostderr -v=2
--- PASS: TestFunctional/parallel/UpdateContextCmd/no_clusters (0.07s)

                                                
                                    
x
+
TestFunctional/parallel/ServiceCmd/DeployApp (10.2s)

                                                
                                                
=== RUN   TestFunctional/parallel/ServiceCmd/DeployApp
functional_test.go:1451: (dbg) Run:  kubectl --context functional-086932 create deployment hello-node --image kicbase/echo-server
functional_test.go:1455: (dbg) Run:  kubectl --context functional-086932 expose deployment hello-node --type=NodePort --port=8080
functional_test.go:1460: (dbg) TestFunctional/parallel/ServiceCmd/DeployApp: waiting 10m0s for pods matching "app=hello-node" in namespace "default" ...
helpers_test.go:352: "hello-node-75c85bcc94-p6js2" [5c74c8e4-36ad-44f0-a87e-81dff1ba980f] Pending / Ready:ContainersNotReady (containers with unready status: [echo-server]) / ContainersReady:ContainersNotReady (containers with unready status: [echo-server])
helpers_test.go:352: "hello-node-75c85bcc94-p6js2" [5c74c8e4-36ad-44f0-a87e-81dff1ba980f] Running
functional_test.go:1460: (dbg) TestFunctional/parallel/ServiceCmd/DeployApp: app=hello-node healthy within 10.006438059s
--- PASS: TestFunctional/parallel/ServiceCmd/DeployApp (10.20s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageListShort (0.21s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageListShort
=== PAUSE TestFunctional/parallel/ImageCommands/ImageListShort

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/ImageCommands/ImageListShort
functional_test.go:276: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 image ls --format short --alsologtostderr
functional_test.go:281: (dbg) Stdout: out/minikube-linux-amd64 -p functional-086932 image ls --format short --alsologtostderr:
registry.k8s.io/pause:latest
registry.k8s.io/pause:3.3
registry.k8s.io/pause:3.10.1
registry.k8s.io/pause:3.1
registry.k8s.io/kube-scheduler:v1.34.1
registry.k8s.io/kube-proxy:v1.34.1
registry.k8s.io/kube-controller-manager:v1.34.1
registry.k8s.io/kube-apiserver:v1.34.1
registry.k8s.io/etcd:3.6.4-0
registry.k8s.io/coredns/coredns:v1.12.1
gcr.io/k8s-minikube/storage-provisioner:v5
gcr.io/k8s-minikube/busybox:1.28.4-glibc
docker.io/library/nginx:latest
docker.io/library/minikube-local-cache-test:functional-086932
docker.io/kicbase/echo-server:latest
docker.io/kicbase/echo-server:functional-086932
functional_test.go:284: (dbg) Stderr: out/minikube-linux-amd64 -p functional-086932 image ls --format short --alsologtostderr:
I1123 08:06:25.727578   28293 out.go:360] Setting OutFile to fd 1 ...
I1123 08:06:25.727836   28293 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1123 08:06:25.727848   28293 out.go:374] Setting ErrFile to fd 2...
I1123 08:06:25.727854   28293 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1123 08:06:25.728161   28293 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21966-18241/.minikube/bin
I1123 08:06:25.729050   28293 config.go:182] Loaded profile config "functional-086932": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1123 08:06:25.729197   28293 config.go:182] Loaded profile config "functional-086932": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1123 08:06:25.732096   28293 ssh_runner.go:195] Run: systemctl --version
I1123 08:06:25.734791   28293 main.go:143] libmachine: domain functional-086932 has defined MAC address 52:54:00:33:43:18 in network mk-functional-086932
I1123 08:06:25.735299   28293 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:33:43:18", ip: ""} in network mk-functional-086932: {Iface:virbr1 ExpiryTime:2025-11-23 09:02:48 +0000 UTC Type:0 Mac:52:54:00:33:43:18 Iaid: IPaddr:192.168.39.224 Prefix:24 Hostname:functional-086932 Clientid:01:52:54:00:33:43:18}
I1123 08:06:25.735337   28293 main.go:143] libmachine: domain functional-086932 has defined IP address 192.168.39.224 and MAC address 52:54:00:33:43:18 in network mk-functional-086932
I1123 08:06:25.735524   28293 sshutil.go:53] new ssh client: &{IP:192.168.39.224 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/functional-086932/id_rsa Username:docker}
I1123 08:06:25.835961   28293 ssh_runner.go:195] Run: docker images --no-trunc --format "{{json .}}"
--- PASS: TestFunctional/parallel/ImageCommands/ImageListShort (0.21s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageListTable (0.19s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageListTable
=== PAUSE TestFunctional/parallel/ImageCommands/ImageListTable

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/ImageCommands/ImageListTable
functional_test.go:276: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 image ls --format table --alsologtostderr
functional_test.go:281: (dbg) Stdout: out/minikube-linux-amd64 -p functional-086932 image ls --format table --alsologtostderr:
┌─────────────────────────────────────────────┬───────────────────┬───────────────┬────────┐
│                    IMAGE                    │        TAG        │   IMAGE ID    │  SIZE  │
├─────────────────────────────────────────────┼───────────────────┼───────────────┼────────┤
│ registry.k8s.io/kube-apiserver              │ v1.34.1           │ c3994bc696102 │ 88MB   │
│ localhost/my-image                          │ functional-086932 │ dd7248992c2e3 │ 1.24MB │
│ docker.io/library/nginx                     │ latest            │ 60adc2e137e75 │ 152MB  │
│ registry.k8s.io/kube-controller-manager     │ v1.34.1           │ c80c8dbafe7dd │ 74.9MB │
│ registry.k8s.io/etcd                        │ 3.6.4-0           │ 5f1f5298c888d │ 195MB  │
│ registry.k8s.io/coredns/coredns             │ v1.12.1           │ 52546a367cc9e │ 75MB   │
│ registry.k8s.io/pause                       │ 3.3               │ 0184c1613d929 │ 683kB  │
│ registry.k8s.io/kube-scheduler              │ v1.34.1           │ 7dd6aaa1717ab │ 52.8MB │
│ registry.k8s.io/pause                       │ 3.10.1            │ cd073f4c5f6a8 │ 736kB  │
│ docker.io/kicbase/echo-server               │ functional-086932 │ 9056ab77afb8e │ 4.94MB │
│ docker.io/kicbase/echo-server               │ latest            │ 9056ab77afb8e │ 4.94MB │
│ gcr.io/k8s-minikube/storage-provisioner     │ v5                │ 6e38f40d628db │ 31.5MB │
│ registry.k8s.io/pause                       │ 3.1               │ da86e6ba6ca19 │ 742kB  │
│ registry.k8s.io/pause                       │ latest            │ 350b164e7ae1d │ 240kB  │
│ docker.io/library/minikube-local-cache-test │ functional-086932 │ 4cdeddb2fc003 │ 30B    │
│ registry.k8s.io/kube-proxy                  │ v1.34.1           │ fc25172553d79 │ 71.9MB │
│ gcr.io/k8s-minikube/busybox                 │ 1.28.4-glibc      │ 56cc512116c8f │ 4.4MB  │
└─────────────────────────────────────────────┴───────────────────┴───────────────┴────────┘
functional_test.go:284: (dbg) Stderr: out/minikube-linux-amd64 -p functional-086932 image ls --format table --alsologtostderr:
I1123 08:06:30.782447   28373 out.go:360] Setting OutFile to fd 1 ...
I1123 08:06:30.782697   28373 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1123 08:06:30.782706   28373 out.go:374] Setting ErrFile to fd 2...
I1123 08:06:30.782710   28373 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1123 08:06:30.782951   28373 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21966-18241/.minikube/bin
I1123 08:06:30.783564   28373 config.go:182] Loaded profile config "functional-086932": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1123 08:06:30.783676   28373 config.go:182] Loaded profile config "functional-086932": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1123 08:06:30.785605   28373 ssh_runner.go:195] Run: systemctl --version
I1123 08:06:30.787683   28373 main.go:143] libmachine: domain functional-086932 has defined MAC address 52:54:00:33:43:18 in network mk-functional-086932
I1123 08:06:30.788081   28373 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:33:43:18", ip: ""} in network mk-functional-086932: {Iface:virbr1 ExpiryTime:2025-11-23 09:02:48 +0000 UTC Type:0 Mac:52:54:00:33:43:18 Iaid: IPaddr:192.168.39.224 Prefix:24 Hostname:functional-086932 Clientid:01:52:54:00:33:43:18}
I1123 08:06:30.788109   28373 main.go:143] libmachine: domain functional-086932 has defined IP address 192.168.39.224 and MAC address 52:54:00:33:43:18 in network mk-functional-086932
I1123 08:06:30.788272   28373 sshutil.go:53] new ssh client: &{IP:192.168.39.224 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/functional-086932/id_rsa Username:docker}
I1123 08:06:30.873918   28373 ssh_runner.go:195] Run: docker images --no-trunc --format "{{json .}}"
--- PASS: TestFunctional/parallel/ImageCommands/ImageListTable (0.19s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageListJson (0.21s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageListJson
=== PAUSE TestFunctional/parallel/ImageCommands/ImageListJson

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/ImageCommands/ImageListJson
functional_test.go:276: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 image ls --format json --alsologtostderr
functional_test.go:281: (dbg) Stdout: out/minikube-linux-amd64 -p functional-086932 image ls --format json --alsologtostderr:
[{"id":"52546a367cc9e0d924aa3b190596a9167fa6e53245023b5b5baf0f07e5443969","repoDigests":[],"repoTags":["registry.k8s.io/coredns/coredns:v1.12.1"],"size":"75000000"},{"id":"fc25172553d79197ecd840ec8dba1fba68330079355e974b04c1a441e6a4a0b7","repoDigests":[],"repoTags":["registry.k8s.io/kube-proxy:v1.34.1"],"size":"71900000"},{"id":"cd073f4c5f6a8e9dc6f3125ba00cf60819cae95c1ec84a1f146ee4a9cf9e803f","repoDigests":[],"repoTags":["registry.k8s.io/pause:3.10.1"],"size":"736000"},{"id":"9056ab77afb8e18e04303f11000a9d31b3f16b74c59475b899ae1b342d328d30","repoDigests":[],"repoTags":["docker.io/kicbase/echo-server:functional-086932","docker.io/kicbase/echo-server:latest"],"size":"4940000"},{"id":"6e38f40d628db3002f5617342c8872c935de530d867d0f709a2fbda1a302a562","repoDigests":[],"repoTags":["gcr.io/k8s-minikube/storage-provisioner:v5"],"size":"31500000"},{"id":"da86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e","repoDigests":[],"repoTags":["registry.k8s.io/pause:3.1"],"size":"742000"},{"id":"350b164e7
ae1dcddeffadd65c76226c9b6dc5553f5179153fb0e36b78f2a5e06","repoDigests":[],"repoTags":["registry.k8s.io/pause:latest"],"size":"240000"},{"id":"4cdeddb2fc003faa0a335c73786ff55c3b3e5e675934bd28ae0c358a75bad7f9","repoDigests":[],"repoTags":["docker.io/library/minikube-local-cache-test:functional-086932"],"size":"30"},{"id":"c3994bc6961024917ec0aeee02e62828108c21a52d87648e30f3080d9cbadc97","repoDigests":[],"repoTags":["registry.k8s.io/kube-apiserver:v1.34.1"],"size":"88000000"},{"id":"5f1f5298c888daa46c4409ff4cefe5ca9d16e479419f94cdb5f5d5563dac0115","repoDigests":[],"repoTags":["registry.k8s.io/etcd:3.6.4-0"],"size":"195000000"},{"id":"0184c1613d92931126feb4c548e5da11015513b9e4c104e7305ee8b53b50a9da","repoDigests":[],"repoTags":["registry.k8s.io/pause:3.3"],"size":"683000"},{"id":"dd7248992c2e32adbb210ca69209aa36ccd793a247b852526072534a5e2dcefc","repoDigests":[],"repoTags":["localhost/my-image:functional-086932"],"size":"1240000"},{"id":"7dd6aaa1717ab7eaae4578503e4c4d9965fcf5a249e8155fe16379ee9b6cb813","repoDigest
s":[],"repoTags":["registry.k8s.io/kube-scheduler:v1.34.1"],"size":"52800000"},{"id":"c80c8dbafe7dd71fc21527912a6dd20ccd1b71f3e561a5c28337388d0619538f","repoDigests":[],"repoTags":["registry.k8s.io/kube-controller-manager:v1.34.1"],"size":"74900000"},{"id":"56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c","repoDigests":[],"repoTags":["gcr.io/k8s-minikube/busybox:1.28.4-glibc"],"size":"4400000"},{"id":"60adc2e137e757418d4d771822fa3b3f5d3b4ad58ef2385d200c9ee78375b6d5","repoDigests":[],"repoTags":["docker.io/library/nginx:latest"],"size":"152000000"}]
functional_test.go:284: (dbg) Stderr: out/minikube-linux-amd64 -p functional-086932 image ls --format json --alsologtostderr:
I1123 08:06:30.582285   28362 out.go:360] Setting OutFile to fd 1 ...
I1123 08:06:30.582531   28362 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1123 08:06:30.582542   28362 out.go:374] Setting ErrFile to fd 2...
I1123 08:06:30.582546   28362 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1123 08:06:30.582754   28362 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21966-18241/.minikube/bin
I1123 08:06:30.583308   28362 config.go:182] Loaded profile config "functional-086932": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1123 08:06:30.583404   28362 config.go:182] Loaded profile config "functional-086932": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1123 08:06:30.586066   28362 ssh_runner.go:195] Run: systemctl --version
I1123 08:06:30.588569   28362 main.go:143] libmachine: domain functional-086932 has defined MAC address 52:54:00:33:43:18 in network mk-functional-086932
I1123 08:06:30.589031   28362 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:33:43:18", ip: ""} in network mk-functional-086932: {Iface:virbr1 ExpiryTime:2025-11-23 09:02:48 +0000 UTC Type:0 Mac:52:54:00:33:43:18 Iaid: IPaddr:192.168.39.224 Prefix:24 Hostname:functional-086932 Clientid:01:52:54:00:33:43:18}
I1123 08:06:30.589062   28362 main.go:143] libmachine: domain functional-086932 has defined IP address 192.168.39.224 and MAC address 52:54:00:33:43:18 in network mk-functional-086932
I1123 08:06:30.589253   28362 sshutil.go:53] new ssh client: &{IP:192.168.39.224 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/functional-086932/id_rsa Username:docker}
I1123 08:06:30.690206   28362 ssh_runner.go:195] Run: docker images --no-trunc --format "{{json .}}"
--- PASS: TestFunctional/parallel/ImageCommands/ImageListJson (0.21s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageListYaml (0.25s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageListYaml
=== PAUSE TestFunctional/parallel/ImageCommands/ImageListYaml

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/ImageCommands/ImageListYaml
functional_test.go:276: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 image ls --format yaml --alsologtostderr
functional_test.go:281: (dbg) Stdout: out/minikube-linux-amd64 -p functional-086932 image ls --format yaml --alsologtostderr:
- id: c3994bc6961024917ec0aeee02e62828108c21a52d87648e30f3080d9cbadc97
repoDigests: []
repoTags:
- registry.k8s.io/kube-apiserver:v1.34.1
size: "88000000"
- id: c80c8dbafe7dd71fc21527912a6dd20ccd1b71f3e561a5c28337388d0619538f
repoDigests: []
repoTags:
- registry.k8s.io/kube-controller-manager:v1.34.1
size: "74900000"
- id: 52546a367cc9e0d924aa3b190596a9167fa6e53245023b5b5baf0f07e5443969
repoDigests: []
repoTags:
- registry.k8s.io/coredns/coredns:v1.12.1
size: "75000000"
- id: fc25172553d79197ecd840ec8dba1fba68330079355e974b04c1a441e6a4a0b7
repoDigests: []
repoTags:
- registry.k8s.io/kube-proxy:v1.34.1
size: "71900000"
- id: 5f1f5298c888daa46c4409ff4cefe5ca9d16e479419f94cdb5f5d5563dac0115
repoDigests: []
repoTags:
- registry.k8s.io/etcd:3.6.4-0
size: "195000000"
- id: cd073f4c5f6a8e9dc6f3125ba00cf60819cae95c1ec84a1f146ee4a9cf9e803f
repoDigests: []
repoTags:
- registry.k8s.io/pause:3.10.1
size: "736000"
- id: 0184c1613d92931126feb4c548e5da11015513b9e4c104e7305ee8b53b50a9da
repoDigests: []
repoTags:
- registry.k8s.io/pause:3.3
size: "683000"
- id: da86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e
repoDigests: []
repoTags:
- registry.k8s.io/pause:3.1
size: "742000"
- id: 4cdeddb2fc003faa0a335c73786ff55c3b3e5e675934bd28ae0c358a75bad7f9
repoDigests: []
repoTags:
- docker.io/library/minikube-local-cache-test:functional-086932
size: "30"
- id: 7dd6aaa1717ab7eaae4578503e4c4d9965fcf5a249e8155fe16379ee9b6cb813
repoDigests: []
repoTags:
- registry.k8s.io/kube-scheduler:v1.34.1
size: "52800000"
- id: 6e38f40d628db3002f5617342c8872c935de530d867d0f709a2fbda1a302a562
repoDigests: []
repoTags:
- gcr.io/k8s-minikube/storage-provisioner:v5
size: "31500000"
- id: 60adc2e137e757418d4d771822fa3b3f5d3b4ad58ef2385d200c9ee78375b6d5
repoDigests: []
repoTags:
- docker.io/library/nginx:latest
size: "152000000"
- id: 9056ab77afb8e18e04303f11000a9d31b3f16b74c59475b899ae1b342d328d30
repoDigests: []
repoTags:
- docker.io/kicbase/echo-server:functional-086932
- docker.io/kicbase/echo-server:latest
size: "4940000"
- id: 56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c
repoDigests: []
repoTags:
- gcr.io/k8s-minikube/busybox:1.28.4-glibc
size: "4400000"
- id: 350b164e7ae1dcddeffadd65c76226c9b6dc5553f5179153fb0e36b78f2a5e06
repoDigests: []
repoTags:
- registry.k8s.io/pause:latest
size: "240000"

                                                
                                                
functional_test.go:284: (dbg) Stderr: out/minikube-linux-amd64 -p functional-086932 image ls --format yaml --alsologtostderr:
I1123 08:06:25.943537   28304 out.go:360] Setting OutFile to fd 1 ...
I1123 08:06:25.943826   28304 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1123 08:06:25.943836   28304 out.go:374] Setting ErrFile to fd 2...
I1123 08:06:25.943843   28304 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1123 08:06:25.944049   28304 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21966-18241/.minikube/bin
I1123 08:06:25.944612   28304 config.go:182] Loaded profile config "functional-086932": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1123 08:06:25.944762   28304 config.go:182] Loaded profile config "functional-086932": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1123 08:06:25.947051   28304 ssh_runner.go:195] Run: systemctl --version
I1123 08:06:25.949666   28304 main.go:143] libmachine: domain functional-086932 has defined MAC address 52:54:00:33:43:18 in network mk-functional-086932
I1123 08:06:25.950117   28304 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:33:43:18", ip: ""} in network mk-functional-086932: {Iface:virbr1 ExpiryTime:2025-11-23 09:02:48 +0000 UTC Type:0 Mac:52:54:00:33:43:18 Iaid: IPaddr:192.168.39.224 Prefix:24 Hostname:functional-086932 Clientid:01:52:54:00:33:43:18}
I1123 08:06:25.950150   28304 main.go:143] libmachine: domain functional-086932 has defined IP address 192.168.39.224 and MAC address 52:54:00:33:43:18 in network mk-functional-086932
I1123 08:06:25.950317   28304 sshutil.go:53] new ssh client: &{IP:192.168.39.224 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/functional-086932/id_rsa Username:docker}
I1123 08:06:26.070116   28304 ssh_runner.go:195] Run: docker images --no-trunc --format "{{json .}}"
--- PASS: TestFunctional/parallel/ImageCommands/ImageListYaml (0.25s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageBuild (4.39s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageBuild
=== PAUSE TestFunctional/parallel/ImageCommands/ImageBuild

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/ImageCommands/ImageBuild
functional_test.go:323: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 ssh pgrep buildkitd
functional_test.go:323: (dbg) Non-zero exit: out/minikube-linux-amd64 -p functional-086932 ssh pgrep buildkitd: exit status 1 (169.848129ms)

                                                
                                                
** stderr ** 
	ssh: Process exited with status 1

                                                
                                                
** /stderr **
functional_test.go:330: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 image build -t localhost/my-image:functional-086932 testdata/build --alsologtostderr
functional_test.go:330: (dbg) Done: out/minikube-linux-amd64 -p functional-086932 image build -t localhost/my-image:functional-086932 testdata/build --alsologtostderr: (4.010712071s)
functional_test.go:338: (dbg) Stderr: out/minikube-linux-amd64 -p functional-086932 image build -t localhost/my-image:functional-086932 testdata/build --alsologtostderr:
I1123 08:06:26.360839   28324 out.go:360] Setting OutFile to fd 1 ...
I1123 08:06:26.361165   28324 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1123 08:06:26.361175   28324 out.go:374] Setting ErrFile to fd 2...
I1123 08:06:26.361180   28324 out.go:408] TERM=,COLORTERM=, which probably does not support color
I1123 08:06:26.361374   28324 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21966-18241/.minikube/bin
I1123 08:06:26.362016   28324 config.go:182] Loaded profile config "functional-086932": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1123 08:06:26.362648   28324 config.go:182] Loaded profile config "functional-086932": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
I1123 08:06:26.364785   28324 ssh_runner.go:195] Run: systemctl --version
I1123 08:06:26.367240   28324 main.go:143] libmachine: domain functional-086932 has defined MAC address 52:54:00:33:43:18 in network mk-functional-086932
I1123 08:06:26.367687   28324 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:33:43:18", ip: ""} in network mk-functional-086932: {Iface:virbr1 ExpiryTime:2025-11-23 09:02:48 +0000 UTC Type:0 Mac:52:54:00:33:43:18 Iaid: IPaddr:192.168.39.224 Prefix:24 Hostname:functional-086932 Clientid:01:52:54:00:33:43:18}
I1123 08:06:26.367720   28324 main.go:143] libmachine: domain functional-086932 has defined IP address 192.168.39.224 and MAC address 52:54:00:33:43:18 in network mk-functional-086932
I1123 08:06:26.367926   28324 sshutil.go:53] new ssh client: &{IP:192.168.39.224 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/functional-086932/id_rsa Username:docker}
I1123 08:06:26.455658   28324 build_images.go:162] Building image from path: /tmp/build.4163865467.tar
I1123 08:06:26.455743   28324 ssh_runner.go:195] Run: sudo mkdir -p /var/lib/minikube/build
I1123 08:06:26.479021   28324 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/build/build.4163865467.tar
I1123 08:06:26.485494   28324 ssh_runner.go:352] existence check for /var/lib/minikube/build/build.4163865467.tar: stat -c "%s %y" /var/lib/minikube/build/build.4163865467.tar: Process exited with status 1
stdout:

                                                
                                                
stderr:
stat: cannot statx '/var/lib/minikube/build/build.4163865467.tar': No such file or directory
I1123 08:06:26.485549   28324 ssh_runner.go:362] scp /tmp/build.4163865467.tar --> /var/lib/minikube/build/build.4163865467.tar (3072 bytes)
I1123 08:06:26.526408   28324 ssh_runner.go:195] Run: sudo mkdir -p /var/lib/minikube/build/build.4163865467
I1123 08:06:26.551875   28324 ssh_runner.go:195] Run: sudo tar -C /var/lib/minikube/build/build.4163865467 -xf /var/lib/minikube/build/build.4163865467.tar
I1123 08:06:26.566485   28324 docker.go:361] Building image: /var/lib/minikube/build/build.4163865467
I1123 08:06:26.566557   28324 ssh_runner.go:195] Run: docker build -t localhost/my-image:functional-086932 /var/lib/minikube/build/build.4163865467
#0 building with "default" instance using docker driver

                                                
                                                
#1 [internal] load build definition from Dockerfile
#1 transferring dockerfile: 97B done
#1 DONE 0.1s

                                                
                                                
#2 [internal] load metadata for gcr.io/k8s-minikube/busybox:latest
#2 DONE 1.8s

                                                
                                                
#3 [internal] load .dockerignore
#3 transferring context: 2B done
#3 DONE 0.0s

                                                
                                                
#4 [internal] load build context
#4 transferring context: 62B done
#4 DONE 0.1s

                                                
                                                
#5 [1/3] FROM gcr.io/k8s-minikube/busybox:latest@sha256:ca5ae90100d50772da31f3b5016209e25ad61972404e2ccd83d44f10dee7e79b
#5 resolve gcr.io/k8s-minikube/busybox:latest@sha256:ca5ae90100d50772da31f3b5016209e25ad61972404e2ccd83d44f10dee7e79b 0.0s done
#5 sha256:ca5ae90100d50772da31f3b5016209e25ad61972404e2ccd83d44f10dee7e79b 770B / 770B done
#5 sha256:62ffc2ed7554e4c6d360bce40bbcf196573dd27c4ce080641a2c59867e732dee 527B / 527B done
#5 sha256:beae173ccac6ad749f76713cf4440fe3d21d1043fe616dfbe30775815d1d0f6a 1.46kB / 1.46kB done
#5 sha256:5cc84ad355aaa64f46ea9c7bbcc319a9d808ab15088a27209c9e70ef86e5a2aa 0B / 772.79kB 0.1s
#5 extracting sha256:5cc84ad355aaa64f46ea9c7bbcc319a9d808ab15088a27209c9e70ef86e5a2aa
#5 sha256:5cc84ad355aaa64f46ea9c7bbcc319a9d808ab15088a27209c9e70ef86e5a2aa 772.79kB / 772.79kB 0.3s done
#5 extracting sha256:5cc84ad355aaa64f46ea9c7bbcc319a9d808ab15088a27209c9e70ef86e5a2aa 0.1s done
#5 DONE 0.6s

                                                
                                                
#6 [2/3] RUN true
#6 DONE 0.7s

                                                
                                                
#7 [3/3] ADD content.txt /
#7 DONE 0.1s

                                                
                                                
#8 exporting to image
#8 exporting layers 0.1s done
#8 writing image sha256:dd7248992c2e32adbb210ca69209aa36ccd793a247b852526072534a5e2dcefc done
#8 naming to localhost/my-image:functional-086932 done
#8 DONE 0.1s
I1123 08:06:30.249449   28324 ssh_runner.go:235] Completed: docker build -t localhost/my-image:functional-086932 /var/lib/minikube/build/build.4163865467: (3.682860845s)
I1123 08:06:30.249527   28324 ssh_runner.go:195] Run: sudo rm -rf /var/lib/minikube/build/build.4163865467
I1123 08:06:30.276649   28324 ssh_runner.go:195] Run: sudo rm -f /var/lib/minikube/build/build.4163865467.tar
I1123 08:06:30.301454   28324 build_images.go:218] Built localhost/my-image:functional-086932 from /tmp/build.4163865467.tar
I1123 08:06:30.301500   28324 build_images.go:134] succeeded building to: functional-086932
I1123 08:06:30.301506   28324 build_images.go:135] failed building to: 
functional_test.go:466: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 image ls
--- PASS: TestFunctional/parallel/ImageCommands/ImageBuild (4.39s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/Setup (1.51s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/Setup
functional_test.go:357: (dbg) Run:  docker pull kicbase/echo-server:1.0
functional_test.go:357: (dbg) Done: docker pull kicbase/echo-server:1.0: (1.488380763s)
functional_test.go:362: (dbg) Run:  docker tag kicbase/echo-server:1.0 kicbase/echo-server:functional-086932
--- PASS: TestFunctional/parallel/ImageCommands/Setup (1.51s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageLoadDaemon (0.89s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageLoadDaemon
functional_test.go:370: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 image load --daemon kicbase/echo-server:functional-086932 --alsologtostderr
functional_test.go:466: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 image ls
--- PASS: TestFunctional/parallel/ImageCommands/ImageLoadDaemon (0.89s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageReloadDaemon (0.71s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageReloadDaemon
functional_test.go:380: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 image load --daemon kicbase/echo-server:functional-086932 --alsologtostderr
functional_test.go:466: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 image ls
--- PASS: TestFunctional/parallel/ImageCommands/ImageReloadDaemon (0.71s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageTagAndLoadDaemon (1.36s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageTagAndLoadDaemon
functional_test.go:250: (dbg) Run:  docker pull kicbase/echo-server:latest
functional_test.go:255: (dbg) Run:  docker tag kicbase/echo-server:latest kicbase/echo-server:functional-086932
functional_test.go:260: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 image load --daemon kicbase/echo-server:functional-086932 --alsologtostderr
functional_test.go:466: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 image ls
--- PASS: TestFunctional/parallel/ImageCommands/ImageTagAndLoadDaemon (1.36s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageSaveToFile (0.32s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageSaveToFile
functional_test.go:395: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 image save kicbase/echo-server:functional-086932 /home/jenkins/workspace/KVM_Linux_integration/echo-server-save.tar --alsologtostderr
--- PASS: TestFunctional/parallel/ImageCommands/ImageSaveToFile (0.32s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageRemove (0.35s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageRemove
functional_test.go:407: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 image rm kicbase/echo-server:functional-086932 --alsologtostderr
functional_test.go:466: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 image ls
--- PASS: TestFunctional/parallel/ImageCommands/ImageRemove (0.35s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageLoadFromFile (0.54s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageLoadFromFile
functional_test.go:424: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 image load /home/jenkins/workspace/KVM_Linux_integration/echo-server-save.tar --alsologtostderr
functional_test.go:466: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 image ls
--- PASS: TestFunctional/parallel/ImageCommands/ImageLoadFromFile (0.54s)

                                                
                                    
x
+
TestFunctional/parallel/ImageCommands/ImageSaveDaemon (0.43s)

                                                
                                                
=== RUN   TestFunctional/parallel/ImageCommands/ImageSaveDaemon
functional_test.go:434: (dbg) Run:  docker rmi kicbase/echo-server:functional-086932
functional_test.go:439: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 image save --daemon kicbase/echo-server:functional-086932 --alsologtostderr
functional_test.go:447: (dbg) Run:  docker image inspect kicbase/echo-server:functional-086932
--- PASS: TestFunctional/parallel/ImageCommands/ImageSaveDaemon (0.43s)

                                                
                                    
x
+
TestFunctional/parallel/ProfileCmd/profile_not_create (0.32s)

                                                
                                                
=== RUN   TestFunctional/parallel/ProfileCmd/profile_not_create
functional_test.go:1285: (dbg) Run:  out/minikube-linux-amd64 profile lis
functional_test.go:1290: (dbg) Run:  out/minikube-linux-amd64 profile list --output json
--- PASS: TestFunctional/parallel/ProfileCmd/profile_not_create (0.32s)

                                                
                                    
x
+
TestFunctional/parallel/ProfileCmd/profile_list (0.31s)

                                                
                                                
=== RUN   TestFunctional/parallel/ProfileCmd/profile_list
functional_test.go:1325: (dbg) Run:  out/minikube-linux-amd64 profile list
functional_test.go:1330: Took "248.535783ms" to run "out/minikube-linux-amd64 profile list"
functional_test.go:1339: (dbg) Run:  out/minikube-linux-amd64 profile list -l
functional_test.go:1344: Took "60.930329ms" to run "out/minikube-linux-amd64 profile list -l"
--- PASS: TestFunctional/parallel/ProfileCmd/profile_list (0.31s)

                                                
                                    
x
+
TestFunctional/parallel/ProfileCmd/profile_json_output (0.31s)

                                                
                                                
=== RUN   TestFunctional/parallel/ProfileCmd/profile_json_output
functional_test.go:1376: (dbg) Run:  out/minikube-linux-amd64 profile list -o json
functional_test.go:1381: Took "251.426481ms" to run "out/minikube-linux-amd64 profile list -o json"
functional_test.go:1389: (dbg) Run:  out/minikube-linux-amd64 profile list -o json --light
functional_test.go:1394: Took "61.173013ms" to run "out/minikube-linux-amd64 profile list -o json --light"
--- PASS: TestFunctional/parallel/ProfileCmd/profile_json_output (0.31s)

                                                
                                    
x
+
TestFunctional/parallel/MountCmd/any-port (14.34s)

                                                
                                                
=== RUN   TestFunctional/parallel/MountCmd/any-port
functional_test_mount_test.go:73: (dbg) daemon: [out/minikube-linux-amd64 mount -p functional-086932 /tmp/TestFunctionalparallelMountCmdany-port534892592/001:/mount-9p --alsologtostderr -v=1]
functional_test_mount_test.go:107: wrote "test-1763885167504058165" to /tmp/TestFunctionalparallelMountCmdany-port534892592/001/created-by-test
functional_test_mount_test.go:107: wrote "test-1763885167504058165" to /tmp/TestFunctionalparallelMountCmdany-port534892592/001/created-by-test-removed-by-pod
functional_test_mount_test.go:107: wrote "test-1763885167504058165" to /tmp/TestFunctionalparallelMountCmdany-port534892592/001/test-1763885167504058165
functional_test_mount_test.go:115: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 ssh "findmnt -T /mount-9p | grep 9p"
functional_test_mount_test.go:115: (dbg) Non-zero exit: out/minikube-linux-amd64 -p functional-086932 ssh "findmnt -T /mount-9p | grep 9p": exit status 1 (153.901342ms)

                                                
                                                
** stderr ** 
	ssh: Process exited with status 1

                                                
                                                
** /stderr **
I1123 08:06:07.658222   22148 retry.go:31] will retry after 734.248561ms: exit status 1
functional_test_mount_test.go:115: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 ssh "findmnt -T /mount-9p | grep 9p"
functional_test_mount_test.go:129: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 ssh -- ls -la /mount-9p
functional_test_mount_test.go:133: guest mount directory contents
total 2
-rw-r--r-- 1 docker docker 24 Nov 23 08:06 created-by-test
-rw-r--r-- 1 docker docker 24 Nov 23 08:06 created-by-test-removed-by-pod
-rw-r--r-- 1 docker docker 24 Nov 23 08:06 test-1763885167504058165
functional_test_mount_test.go:137: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 ssh cat /mount-9p/test-1763885167504058165
functional_test_mount_test.go:148: (dbg) Run:  kubectl --context functional-086932 replace --force -f testdata/busybox-mount-test.yaml
functional_test_mount_test.go:153: (dbg) TestFunctional/parallel/MountCmd/any-port: waiting 4m0s for pods matching "integration-test=busybox-mount" in namespace "default" ...
helpers_test.go:352: "busybox-mount" [00a9695f-f644-4ba3-930a-eb3db8e49683] Pending
helpers_test.go:352: "busybox-mount" [00a9695f-f644-4ba3-930a-eb3db8e49683] Pending / Ready:ContainersNotReady (containers with unready status: [mount-munger]) / ContainersReady:ContainersNotReady (containers with unready status: [mount-munger])
helpers_test.go:352: "busybox-mount" [00a9695f-f644-4ba3-930a-eb3db8e49683] Pending / Initialized:PodCompleted / Ready:PodCompleted / ContainersReady:PodCompleted
helpers_test.go:352: "busybox-mount" [00a9695f-f644-4ba3-930a-eb3db8e49683] Succeeded / Initialized:PodCompleted / Ready:PodCompleted / ContainersReady:PodCompleted
functional_test_mount_test.go:153: (dbg) TestFunctional/parallel/MountCmd/any-port: integration-test=busybox-mount healthy within 12.005565168s
functional_test_mount_test.go:169: (dbg) Run:  kubectl --context functional-086932 logs busybox-mount
functional_test_mount_test.go:181: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 ssh stat /mount-9p/created-by-test
functional_test_mount_test.go:181: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 ssh stat /mount-9p/created-by-pod
functional_test_mount_test.go:90: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 ssh "sudo umount -f /mount-9p"
functional_test_mount_test.go:94: (dbg) stopping [out/minikube-linux-amd64 mount -p functional-086932 /tmp/TestFunctionalparallelMountCmdany-port534892592/001:/mount-9p --alsologtostderr -v=1] ...
--- PASS: TestFunctional/parallel/MountCmd/any-port (14.34s)

                                                
                                    
x
+
TestFunctional/parallel/ServiceCmd/List (0.32s)

                                                
                                                
=== RUN   TestFunctional/parallel/ServiceCmd/List
functional_test.go:1469: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 service list
--- PASS: TestFunctional/parallel/ServiceCmd/List (0.32s)

                                                
                                    
x
+
TestFunctional/parallel/ServiceCmd/JSONOutput (0.51s)

                                                
                                                
=== RUN   TestFunctional/parallel/ServiceCmd/JSONOutput
functional_test.go:1499: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 service list -o json
functional_test.go:1504: Took "510.319056ms" to run "out/minikube-linux-amd64 -p functional-086932 service list -o json"
--- PASS: TestFunctional/parallel/ServiceCmd/JSONOutput (0.51s)

                                                
                                    
x
+
TestFunctional/parallel/ServiceCmd/HTTPS (0.3s)

                                                
                                                
=== RUN   TestFunctional/parallel/ServiceCmd/HTTPS
functional_test.go:1519: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 service --namespace=default --https --url hello-node
functional_test.go:1532: found endpoint: https://192.168.39.224:30198
--- PASS: TestFunctional/parallel/ServiceCmd/HTTPS (0.30s)

                                                
                                    
x
+
TestFunctional/parallel/ServiceCmd/Format (0.27s)

                                                
                                                
=== RUN   TestFunctional/parallel/ServiceCmd/Format
functional_test.go:1550: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 service hello-node --url --format={{.IP}}
--- PASS: TestFunctional/parallel/ServiceCmd/Format (0.27s)

                                                
                                    
x
+
TestFunctional/parallel/ServiceCmd/URL (0.24s)

                                                
                                                
=== RUN   TestFunctional/parallel/ServiceCmd/URL
functional_test.go:1569: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 service hello-node --url
functional_test.go:1575: found endpoint for hello-node: http://192.168.39.224:30198
--- PASS: TestFunctional/parallel/ServiceCmd/URL (0.24s)

                                                
                                    
x
+
TestFunctional/parallel/MountCmd/specific-port (1.66s)

                                                
                                                
=== RUN   TestFunctional/parallel/MountCmd/specific-port
functional_test_mount_test.go:213: (dbg) daemon: [out/minikube-linux-amd64 mount -p functional-086932 /tmp/TestFunctionalparallelMountCmdspecific-port3752953052/001:/mount-9p --alsologtostderr -v=1 --port 46464]
functional_test_mount_test.go:243: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 ssh "findmnt -T /mount-9p | grep 9p"
functional_test_mount_test.go:243: (dbg) Non-zero exit: out/minikube-linux-amd64 -p functional-086932 ssh "findmnt -T /mount-9p | grep 9p": exit status 1 (190.184864ms)

                                                
                                                
** stderr ** 
	ssh: Process exited with status 1

                                                
                                                
** /stderr **
I1123 08:06:22.037719   22148 retry.go:31] will retry after 656.146294ms: exit status 1
functional_test_mount_test.go:243: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 ssh "findmnt -T /mount-9p | grep 9p"
functional_test_mount_test.go:257: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 ssh -- ls -la /mount-9p
functional_test_mount_test.go:261: guest mount directory contents
total 0
functional_test_mount_test.go:263: (dbg) stopping [out/minikube-linux-amd64 mount -p functional-086932 /tmp/TestFunctionalparallelMountCmdspecific-port3752953052/001:/mount-9p --alsologtostderr -v=1 --port 46464] ...
functional_test_mount_test.go:264: reading mount text
functional_test_mount_test.go:278: done reading mount text
functional_test_mount_test.go:230: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 ssh "sudo umount -f /mount-9p"
functional_test_mount_test.go:230: (dbg) Non-zero exit: out/minikube-linux-amd64 -p functional-086932 ssh "sudo umount -f /mount-9p": exit status 1 (183.765614ms)

                                                
                                                
-- stdout --
	umount: /mount-9p: not mounted.

                                                
                                                
-- /stdout --
** stderr ** 
	ssh: Process exited with status 32

                                                
                                                
** /stderr **
functional_test_mount_test.go:232: "out/minikube-linux-amd64 -p functional-086932 ssh \"sudo umount -f /mount-9p\"": exit status 1
functional_test_mount_test.go:234: (dbg) stopping [out/minikube-linux-amd64 mount -p functional-086932 /tmp/TestFunctionalparallelMountCmdspecific-port3752953052/001:/mount-9p --alsologtostderr -v=1 --port 46464] ...
--- PASS: TestFunctional/parallel/MountCmd/specific-port (1.66s)

                                                
                                    
x
+
TestFunctional/parallel/MountCmd/VerifyCleanup (1.2s)

                                                
                                                
=== RUN   TestFunctional/parallel/MountCmd/VerifyCleanup
functional_test_mount_test.go:298: (dbg) daemon: [out/minikube-linux-amd64 mount -p functional-086932 /tmp/TestFunctionalparallelMountCmdVerifyCleanup3102048381/001:/mount1 --alsologtostderr -v=1]
functional_test_mount_test.go:298: (dbg) daemon: [out/minikube-linux-amd64 mount -p functional-086932 /tmp/TestFunctionalparallelMountCmdVerifyCleanup3102048381/001:/mount2 --alsologtostderr -v=1]
functional_test_mount_test.go:298: (dbg) daemon: [out/minikube-linux-amd64 mount -p functional-086932 /tmp/TestFunctionalparallelMountCmdVerifyCleanup3102048381/001:/mount3 --alsologtostderr -v=1]
functional_test_mount_test.go:325: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 ssh "findmnt -T" /mount1
functional_test_mount_test.go:325: (dbg) Non-zero exit: out/minikube-linux-amd64 -p functional-086932 ssh "findmnt -T" /mount1: exit status 1 (245.174867ms)

                                                
                                                
** stderr ** 
	ssh: Process exited with status 1

                                                
                                                
** /stderr **
I1123 08:06:23.749828   22148 retry.go:31] will retry after 376.694108ms: exit status 1
functional_test_mount_test.go:325: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 ssh "findmnt -T" /mount1
functional_test_mount_test.go:325: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 ssh "findmnt -T" /mount2
functional_test_mount_test.go:325: (dbg) Run:  out/minikube-linux-amd64 -p functional-086932 ssh "findmnt -T" /mount3
functional_test_mount_test.go:370: (dbg) Run:  out/minikube-linux-amd64 mount -p functional-086932 --kill=true
functional_test_mount_test.go:313: (dbg) stopping [out/minikube-linux-amd64 mount -p functional-086932 /tmp/TestFunctionalparallelMountCmdVerifyCleanup3102048381/001:/mount1 --alsologtostderr -v=1] ...
helpers_test.go:507: unable to find parent, assuming dead: process does not exist
functional_test_mount_test.go:313: (dbg) stopping [out/minikube-linux-amd64 mount -p functional-086932 /tmp/TestFunctionalparallelMountCmdVerifyCleanup3102048381/001:/mount2 --alsologtostderr -v=1] ...
helpers_test.go:507: unable to find parent, assuming dead: process does not exist
functional_test_mount_test.go:313: (dbg) stopping [out/minikube-linux-amd64 mount -p functional-086932 /tmp/TestFunctionalparallelMountCmdVerifyCleanup3102048381/001:/mount3 --alsologtostderr -v=1] ...
helpers_test.go:507: unable to find parent, assuming dead: process does not exist
--- PASS: TestFunctional/parallel/MountCmd/VerifyCleanup (1.20s)

                                                
                                    
x
+
TestFunctional/delete_echo-server_images (0.04s)

                                                
                                                
=== RUN   TestFunctional/delete_echo-server_images
functional_test.go:205: (dbg) Run:  docker rmi -f kicbase/echo-server:1.0
functional_test.go:205: (dbg) Run:  docker rmi -f kicbase/echo-server:functional-086932
--- PASS: TestFunctional/delete_echo-server_images (0.04s)

                                                
                                    
x
+
TestFunctional/delete_my-image_image (0.02s)

                                                
                                                
=== RUN   TestFunctional/delete_my-image_image
functional_test.go:213: (dbg) Run:  docker rmi -f localhost/my-image:functional-086932
--- PASS: TestFunctional/delete_my-image_image (0.02s)

                                                
                                    
x
+
TestFunctional/delete_minikube_cached_images (0.02s)

                                                
                                                
=== RUN   TestFunctional/delete_minikube_cached_images
functional_test.go:221: (dbg) Run:  docker rmi -f minikube-local-cache-test:functional-086932
--- PASS: TestFunctional/delete_minikube_cached_images (0.02s)

                                                
                                    
x
+
TestGvisorAddon (209.47s)

                                                
                                                
=== RUN   TestGvisorAddon
=== PAUSE TestGvisorAddon

                                                
                                                

                                                
                                                
=== CONT  TestGvisorAddon
gvisor_addon_test.go:52: (dbg) Run:  out/minikube-linux-amd64 start -p gvisor-350074 --memory=3072 --container-runtime=containerd --docker-opt containerd=/var/run/containerd/containerd.sock --driver=kvm2 
gvisor_addon_test.go:52: (dbg) Done: out/minikube-linux-amd64 start -p gvisor-350074 --memory=3072 --container-runtime=containerd --docker-opt containerd=/var/run/containerd/containerd.sock --driver=kvm2 : (1m7.792900496s)
gvisor_addon_test.go:58: (dbg) Run:  out/minikube-linux-amd64 -p gvisor-350074 cache add gcr.io/k8s-minikube/gvisor-addon:2
gvisor_addon_test.go:58: (dbg) Done: out/minikube-linux-amd64 -p gvisor-350074 cache add gcr.io/k8s-minikube/gvisor-addon:2: (5.122105418s)
gvisor_addon_test.go:63: (dbg) Run:  out/minikube-linux-amd64 -p gvisor-350074 addons enable gvisor
gvisor_addon_test.go:63: (dbg) Done: out/minikube-linux-amd64 -p gvisor-350074 addons enable gvisor: (4.919992487s)
gvisor_addon_test.go:68: (dbg) TestGvisorAddon: waiting 4m0s for pods matching "kubernetes.io/minikube-addons=gvisor" in namespace "kube-system" ...
helpers_test.go:352: "gvisor" [2259c785-f379-4085-aede-be04cb105ef9] Running
gvisor_addon_test.go:68: (dbg) TestGvisorAddon: kubernetes.io/minikube-addons=gvisor healthy within 6.004396894s
gvisor_addon_test.go:73: (dbg) Run:  kubectl --context gvisor-350074 replace --force -f testdata/nginx-gvisor.yaml
gvisor_addon_test.go:78: (dbg) TestGvisorAddon: waiting 4m0s for pods matching "run=nginx,runtime=gvisor" in namespace "default" ...
helpers_test.go:352: "nginx-gvisor" [43fe5491-c4c7-4863-bec4-99fdd0037bad] Pending
helpers_test.go:352: "nginx-gvisor" [43fe5491-c4c7-4863-bec4-99fdd0037bad] Pending / Ready:ContainersNotReady (containers with unready status: [nginx]) / ContainersReady:ContainersNotReady (containers with unready status: [nginx])
helpers_test.go:352: "nginx-gvisor" [43fe5491-c4c7-4863-bec4-99fdd0037bad] Running
gvisor_addon_test.go:78: (dbg) TestGvisorAddon: run=nginx,runtime=gvisor healthy within 54.005576856s
gvisor_addon_test.go:83: (dbg) Run:  out/minikube-linux-amd64 stop -p gvisor-350074
gvisor_addon_test.go:83: (dbg) Done: out/minikube-linux-amd64 stop -p gvisor-350074: (7.960109575s)
gvisor_addon_test.go:88: (dbg) Run:  out/minikube-linux-amd64 start -p gvisor-350074 --memory=3072 --container-runtime=containerd --docker-opt containerd=/var/run/containerd/containerd.sock --driver=kvm2 
gvisor_addon_test.go:88: (dbg) Done: out/minikube-linux-amd64 start -p gvisor-350074 --memory=3072 --container-runtime=containerd --docker-opt containerd=/var/run/containerd/containerd.sock --driver=kvm2 : (51.429396772s)
gvisor_addon_test.go:92: (dbg) TestGvisorAddon: waiting 4m0s for pods matching "kubernetes.io/minikube-addons=gvisor" in namespace "kube-system" ...
helpers_test.go:352: "gvisor" [2259c785-f379-4085-aede-be04cb105ef9] Running / Ready:ContainersNotReady (containers with unready status: [gvisor]) / ContainersReady:ContainersNotReady (containers with unready status: [gvisor])
helpers_test.go:352: "gvisor" [2259c785-f379-4085-aede-be04cb105ef9] Running
gvisor_addon_test.go:92: (dbg) TestGvisorAddon: kubernetes.io/minikube-addons=gvisor healthy within 6.005592308s
gvisor_addon_test.go:95: (dbg) TestGvisorAddon: waiting 4m0s for pods matching "run=nginx,runtime=gvisor" in namespace "default" ...
helpers_test.go:352: "nginx-gvisor" [43fe5491-c4c7-4863-bec4-99fdd0037bad] Running / Ready:ContainersNotReady (containers with unready status: [nginx]) / ContainersReady:ContainersNotReady (containers with unready status: [nginx])
gvisor_addon_test.go:95: (dbg) TestGvisorAddon: run=nginx,runtime=gvisor healthy within 5.004197274s
helpers_test.go:175: Cleaning up "gvisor-350074" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-amd64 delete -p gvisor-350074
helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p gvisor-350074: (1.044669849s)
--- PASS: TestGvisorAddon (209.47s)

                                                
                                    
x
+
TestMultiControlPlane/serial/StartCluster (229.33s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/StartCluster
ha_test.go:101: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 start --ha --memory 3072 --wait true --alsologtostderr -v 5 --driver=kvm2 
E1123 08:09:07.497742   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/addons-085189/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:09:35.205132   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/addons-085189/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
ha_test.go:101: (dbg) Done: out/minikube-linux-amd64 -p ha-200207 start --ha --memory 3072 --wait true --alsologtostderr -v 5 --driver=kvm2 : (3m48.732642478s)
ha_test.go:107: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 status --alsologtostderr -v 5
--- PASS: TestMultiControlPlane/serial/StartCluster (229.33s)

                                                
                                    
x
+
TestMultiControlPlane/serial/DeployApp (7.05s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/DeployApp
ha_test.go:128: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 kubectl -- apply -f ./testdata/ha/ha-pod-dns-test.yaml
ha_test.go:133: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 kubectl -- rollout status deployment/busybox
ha_test.go:133: (dbg) Done: out/minikube-linux-amd64 -p ha-200207 kubectl -- rollout status deployment/busybox: (4.33339663s)
ha_test.go:140: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}'
ha_test.go:163: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 kubectl -- get pods -o jsonpath='{.items[*].metadata.name}'
ha_test.go:171: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 kubectl -- exec busybox-7b57f96db7-cgzz7 -- nslookup kubernetes.io
ha_test.go:171: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 kubectl -- exec busybox-7b57f96db7-kpjqj -- nslookup kubernetes.io
ha_test.go:171: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 kubectl -- exec busybox-7b57f96db7-sdw7s -- nslookup kubernetes.io
ha_test.go:181: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 kubectl -- exec busybox-7b57f96db7-cgzz7 -- nslookup kubernetes.default
ha_test.go:181: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 kubectl -- exec busybox-7b57f96db7-kpjqj -- nslookup kubernetes.default
ha_test.go:181: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 kubectl -- exec busybox-7b57f96db7-sdw7s -- nslookup kubernetes.default
ha_test.go:189: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 kubectl -- exec busybox-7b57f96db7-cgzz7 -- nslookup kubernetes.default.svc.cluster.local
ha_test.go:189: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 kubectl -- exec busybox-7b57f96db7-kpjqj -- nslookup kubernetes.default.svc.cluster.local
ha_test.go:189: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 kubectl -- exec busybox-7b57f96db7-sdw7s -- nslookup kubernetes.default.svc.cluster.local
--- PASS: TestMultiControlPlane/serial/DeployApp (7.05s)

                                                
                                    
x
+
TestMultiControlPlane/serial/PingHostFromPods (1.51s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/PingHostFromPods
ha_test.go:199: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 kubectl -- get pods -o jsonpath='{.items[*].metadata.name}'
ha_test.go:207: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 kubectl -- exec busybox-7b57f96db7-cgzz7 -- sh -c "nslookup host.minikube.internal | awk 'NR==5' | cut -d' ' -f3"
ha_test.go:218: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 kubectl -- exec busybox-7b57f96db7-cgzz7 -- sh -c "ping -c 1 192.168.39.1"
ha_test.go:207: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 kubectl -- exec busybox-7b57f96db7-kpjqj -- sh -c "nslookup host.minikube.internal | awk 'NR==5' | cut -d' ' -f3"
ha_test.go:218: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 kubectl -- exec busybox-7b57f96db7-kpjqj -- sh -c "ping -c 1 192.168.39.1"
ha_test.go:207: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 kubectl -- exec busybox-7b57f96db7-sdw7s -- sh -c "nslookup host.minikube.internal | awk 'NR==5' | cut -d' ' -f3"
ha_test.go:218: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 kubectl -- exec busybox-7b57f96db7-sdw7s -- sh -c "ping -c 1 192.168.39.1"
--- PASS: TestMultiControlPlane/serial/PingHostFromPods (1.51s)

                                                
                                    
x
+
TestMultiControlPlane/serial/AddWorkerNode (51.02s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/AddWorkerNode
ha_test.go:228: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 node add --alsologtostderr -v 5
E1123 08:10:59.015878   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/functional-086932/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:10:59.022364   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/functional-086932/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:10:59.033727   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/functional-086932/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:10:59.055223   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/functional-086932/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:10:59.096731   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/functional-086932/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:10:59.178273   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/functional-086932/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:10:59.339923   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/functional-086932/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:10:59.661617   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/functional-086932/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:11:00.303626   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/functional-086932/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:11:01.585655   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/functional-086932/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:11:04.147417   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/functional-086932/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:11:09.269601   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/functional-086932/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:11:19.511934   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/functional-086932/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:11:39.993421   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/functional-086932/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
ha_test.go:228: (dbg) Done: out/minikube-linux-amd64 -p ha-200207 node add --alsologtostderr -v 5: (50.314352353s)
ha_test.go:234: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 status --alsologtostderr -v 5
--- PASS: TestMultiControlPlane/serial/AddWorkerNode (51.02s)

                                                
                                    
x
+
TestMultiControlPlane/serial/NodeLabels (0.07s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/NodeLabels
ha_test.go:255: (dbg) Run:  kubectl --context ha-200207 get nodes -o "jsonpath=[{range .items[*]}{.metadata.labels},{end}]"
--- PASS: TestMultiControlPlane/serial/NodeLabels (0.07s)

                                                
                                    
x
+
TestMultiControlPlane/serial/HAppyAfterClusterStart (0.69s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/HAppyAfterClusterStart
ha_test.go:281: (dbg) Run:  out/minikube-linux-amd64 profile list --output json
--- PASS: TestMultiControlPlane/serial/HAppyAfterClusterStart (0.69s)

                                                
                                    
x
+
TestMultiControlPlane/serial/CopyFile (10.84s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/CopyFile
ha_test.go:328: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 status --output json --alsologtostderr -v 5
helpers_test.go:573: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 cp testdata/cp-test.txt ha-200207:/home/docker/cp-test.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 ssh -n ha-200207 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 cp ha-200207:/home/docker/cp-test.txt /tmp/TestMultiControlPlaneserialCopyFile2274466294/001/cp-test_ha-200207.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 ssh -n ha-200207 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 cp ha-200207:/home/docker/cp-test.txt ha-200207-m02:/home/docker/cp-test_ha-200207_ha-200207-m02.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 ssh -n ha-200207 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 ssh -n ha-200207-m02 "sudo cat /home/docker/cp-test_ha-200207_ha-200207-m02.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 cp ha-200207:/home/docker/cp-test.txt ha-200207-m03:/home/docker/cp-test_ha-200207_ha-200207-m03.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 ssh -n ha-200207 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 ssh -n ha-200207-m03 "sudo cat /home/docker/cp-test_ha-200207_ha-200207-m03.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 cp ha-200207:/home/docker/cp-test.txt ha-200207-m04:/home/docker/cp-test_ha-200207_ha-200207-m04.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 ssh -n ha-200207 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 ssh -n ha-200207-m04 "sudo cat /home/docker/cp-test_ha-200207_ha-200207-m04.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 cp testdata/cp-test.txt ha-200207-m02:/home/docker/cp-test.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 ssh -n ha-200207-m02 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 cp ha-200207-m02:/home/docker/cp-test.txt /tmp/TestMultiControlPlaneserialCopyFile2274466294/001/cp-test_ha-200207-m02.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 ssh -n ha-200207-m02 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 cp ha-200207-m02:/home/docker/cp-test.txt ha-200207:/home/docker/cp-test_ha-200207-m02_ha-200207.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 ssh -n ha-200207-m02 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 ssh -n ha-200207 "sudo cat /home/docker/cp-test_ha-200207-m02_ha-200207.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 cp ha-200207-m02:/home/docker/cp-test.txt ha-200207-m03:/home/docker/cp-test_ha-200207-m02_ha-200207-m03.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 ssh -n ha-200207-m02 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 ssh -n ha-200207-m03 "sudo cat /home/docker/cp-test_ha-200207-m02_ha-200207-m03.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 cp ha-200207-m02:/home/docker/cp-test.txt ha-200207-m04:/home/docker/cp-test_ha-200207-m02_ha-200207-m04.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 ssh -n ha-200207-m02 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 ssh -n ha-200207-m04 "sudo cat /home/docker/cp-test_ha-200207-m02_ha-200207-m04.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 cp testdata/cp-test.txt ha-200207-m03:/home/docker/cp-test.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 ssh -n ha-200207-m03 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 cp ha-200207-m03:/home/docker/cp-test.txt /tmp/TestMultiControlPlaneserialCopyFile2274466294/001/cp-test_ha-200207-m03.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 ssh -n ha-200207-m03 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 cp ha-200207-m03:/home/docker/cp-test.txt ha-200207:/home/docker/cp-test_ha-200207-m03_ha-200207.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 ssh -n ha-200207-m03 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 ssh -n ha-200207 "sudo cat /home/docker/cp-test_ha-200207-m03_ha-200207.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 cp ha-200207-m03:/home/docker/cp-test.txt ha-200207-m02:/home/docker/cp-test_ha-200207-m03_ha-200207-m02.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 ssh -n ha-200207-m03 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 ssh -n ha-200207-m02 "sudo cat /home/docker/cp-test_ha-200207-m03_ha-200207-m02.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 cp ha-200207-m03:/home/docker/cp-test.txt ha-200207-m04:/home/docker/cp-test_ha-200207-m03_ha-200207-m04.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 ssh -n ha-200207-m03 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 ssh -n ha-200207-m04 "sudo cat /home/docker/cp-test_ha-200207-m03_ha-200207-m04.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 cp testdata/cp-test.txt ha-200207-m04:/home/docker/cp-test.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 ssh -n ha-200207-m04 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 cp ha-200207-m04:/home/docker/cp-test.txt /tmp/TestMultiControlPlaneserialCopyFile2274466294/001/cp-test_ha-200207-m04.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 ssh -n ha-200207-m04 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 cp ha-200207-m04:/home/docker/cp-test.txt ha-200207:/home/docker/cp-test_ha-200207-m04_ha-200207.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 ssh -n ha-200207-m04 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 ssh -n ha-200207 "sudo cat /home/docker/cp-test_ha-200207-m04_ha-200207.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 cp ha-200207-m04:/home/docker/cp-test.txt ha-200207-m02:/home/docker/cp-test_ha-200207-m04_ha-200207-m02.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 ssh -n ha-200207-m04 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 ssh -n ha-200207-m02 "sudo cat /home/docker/cp-test_ha-200207-m04_ha-200207-m02.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 cp ha-200207-m04:/home/docker/cp-test.txt ha-200207-m03:/home/docker/cp-test_ha-200207-m04_ha-200207-m03.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 ssh -n ha-200207-m04 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 ssh -n ha-200207-m03 "sudo cat /home/docker/cp-test_ha-200207-m04_ha-200207-m03.txt"
--- PASS: TestMultiControlPlane/serial/CopyFile (10.84s)

                                                
                                    
x
+
TestMultiControlPlane/serial/StopSecondaryNode (14.15s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/StopSecondaryNode
ha_test.go:365: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 node stop m02 --alsologtostderr -v 5
ha_test.go:365: (dbg) Done: out/minikube-linux-amd64 -p ha-200207 node stop m02 --alsologtostderr -v 5: (13.642436345s)
ha_test.go:371: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 status --alsologtostderr -v 5
ha_test.go:371: (dbg) Non-zero exit: out/minikube-linux-amd64 -p ha-200207 status --alsologtostderr -v 5: exit status 7 (509.866004ms)

                                                
                                                
-- stdout --
	ha-200207
	type: Control Plane
	host: Running
	kubelet: Running
	apiserver: Running
	kubeconfig: Configured
	
	ha-200207-m02
	type: Control Plane
	host: Stopped
	kubelet: Stopped
	apiserver: Stopped
	kubeconfig: Stopped
	
	ha-200207-m03
	type: Control Plane
	host: Running
	kubelet: Running
	apiserver: Running
	kubeconfig: Configured
	
	ha-200207-m04
	type: Worker
	host: Running
	kubelet: Running
	

                                                
                                                
-- /stdout --
** stderr ** 
	I1123 08:12:09.647587   31320 out.go:360] Setting OutFile to fd 1 ...
	I1123 08:12:09.647867   31320 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I1123 08:12:09.647877   31320 out.go:374] Setting ErrFile to fd 2...
	I1123 08:12:09.647882   31320 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I1123 08:12:09.648064   31320 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21966-18241/.minikube/bin
	I1123 08:12:09.648230   31320 out.go:368] Setting JSON to false
	I1123 08:12:09.648272   31320 mustload.go:66] Loading cluster: ha-200207
	I1123 08:12:09.648324   31320 notify.go:221] Checking for updates...
	I1123 08:12:09.648792   31320 config.go:182] Loaded profile config "ha-200207": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
	I1123 08:12:09.648814   31320 status.go:174] checking status of ha-200207 ...
	I1123 08:12:09.650901   31320 status.go:371] ha-200207 host status = "Running" (err=<nil>)
	I1123 08:12:09.650923   31320 host.go:66] Checking if "ha-200207" exists ...
	I1123 08:12:09.653857   31320 main.go:143] libmachine: domain ha-200207 has defined MAC address 52:54:00:46:94:e4 in network mk-ha-200207
	I1123 08:12:09.654423   31320 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:46:94:e4", ip: ""} in network mk-ha-200207: {Iface:virbr1 ExpiryTime:2025-11-23 09:07:11 +0000 UTC Type:0 Mac:52:54:00:46:94:e4 Iaid: IPaddr:192.168.39.86 Prefix:24 Hostname:ha-200207 Clientid:01:52:54:00:46:94:e4}
	I1123 08:12:09.654448   31320 main.go:143] libmachine: domain ha-200207 has defined IP address 192.168.39.86 and MAC address 52:54:00:46:94:e4 in network mk-ha-200207
	I1123 08:12:09.654624   31320 host.go:66] Checking if "ha-200207" exists ...
	I1123 08:12:09.654868   31320 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
	I1123 08:12:09.657919   31320 main.go:143] libmachine: domain ha-200207 has defined MAC address 52:54:00:46:94:e4 in network mk-ha-200207
	I1123 08:12:09.658482   31320 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:46:94:e4", ip: ""} in network mk-ha-200207: {Iface:virbr1 ExpiryTime:2025-11-23 09:07:11 +0000 UTC Type:0 Mac:52:54:00:46:94:e4 Iaid: IPaddr:192.168.39.86 Prefix:24 Hostname:ha-200207 Clientid:01:52:54:00:46:94:e4}
	I1123 08:12:09.658505   31320 main.go:143] libmachine: domain ha-200207 has defined IP address 192.168.39.86 and MAC address 52:54:00:46:94:e4 in network mk-ha-200207
	I1123 08:12:09.658667   31320 sshutil.go:53] new ssh client: &{IP:192.168.39.86 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/ha-200207/id_rsa Username:docker}
	I1123 08:12:09.747452   31320 ssh_runner.go:195] Run: systemctl --version
	I1123 08:12:09.761925   31320 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I1123 08:12:09.780579   31320 kubeconfig.go:125] found "ha-200207" server: "https://192.168.39.254:8443"
	I1123 08:12:09.780616   31320 api_server.go:166] Checking apiserver status ...
	I1123 08:12:09.780663   31320 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I1123 08:12:09.802484   31320 ssh_runner.go:195] Run: sudo egrep ^[0-9]+:freezer: /proc/2473/cgroup
	W1123 08:12:09.814649   31320 api_server.go:177] unable to find freezer cgroup: sudo egrep ^[0-9]+:freezer: /proc/2473/cgroup: Process exited with status 1
	stdout:
	
	stderr:
	I1123 08:12:09.814702   31320 ssh_runner.go:195] Run: ls
	I1123 08:12:09.820065   31320 api_server.go:253] Checking apiserver healthz at https://192.168.39.254:8443/healthz ...
	I1123 08:12:09.824808   31320 api_server.go:279] https://192.168.39.254:8443/healthz returned 200:
	ok
	I1123 08:12:09.824833   31320 status.go:463] ha-200207 apiserver status = Running (err=<nil>)
	I1123 08:12:09.824844   31320 status.go:176] ha-200207 status: &{Name:ha-200207 Host:Running Kubelet:Running APIServer:Running Kubeconfig:Configured Worker:false TimeToStop: DockerEnv: PodManEnv:}
	I1123 08:12:09.824877   31320 status.go:174] checking status of ha-200207-m02 ...
	I1123 08:12:09.826680   31320 status.go:371] ha-200207-m02 host status = "Stopped" (err=<nil>)
	I1123 08:12:09.826696   31320 status.go:384] host is not running, skipping remaining checks
	I1123 08:12:09.826702   31320 status.go:176] ha-200207-m02 status: &{Name:ha-200207-m02 Host:Stopped Kubelet:Stopped APIServer:Stopped Kubeconfig:Stopped Worker:false TimeToStop: DockerEnv: PodManEnv:}
	I1123 08:12:09.826715   31320 status.go:174] checking status of ha-200207-m03 ...
	I1123 08:12:09.828004   31320 status.go:371] ha-200207-m03 host status = "Running" (err=<nil>)
	I1123 08:12:09.828021   31320 host.go:66] Checking if "ha-200207-m03" exists ...
	I1123 08:12:09.830449   31320 main.go:143] libmachine: domain ha-200207-m03 has defined MAC address 52:54:00:8e:49:d0 in network mk-ha-200207
	I1123 08:12:09.831014   31320 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:8e:49:d0", ip: ""} in network mk-ha-200207: {Iface:virbr1 ExpiryTime:2025-11-23 09:09:30 +0000 UTC Type:0 Mac:52:54:00:8e:49:d0 Iaid: IPaddr:192.168.39.189 Prefix:24 Hostname:ha-200207-m03 Clientid:01:52:54:00:8e:49:d0}
	I1123 08:12:09.831040   31320 main.go:143] libmachine: domain ha-200207-m03 has defined IP address 192.168.39.189 and MAC address 52:54:00:8e:49:d0 in network mk-ha-200207
	I1123 08:12:09.831225   31320 host.go:66] Checking if "ha-200207-m03" exists ...
	I1123 08:12:09.831469   31320 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
	I1123 08:12:09.833851   31320 main.go:143] libmachine: domain ha-200207-m03 has defined MAC address 52:54:00:8e:49:d0 in network mk-ha-200207
	I1123 08:12:09.834205   31320 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:8e:49:d0", ip: ""} in network mk-ha-200207: {Iface:virbr1 ExpiryTime:2025-11-23 09:09:30 +0000 UTC Type:0 Mac:52:54:00:8e:49:d0 Iaid: IPaddr:192.168.39.189 Prefix:24 Hostname:ha-200207-m03 Clientid:01:52:54:00:8e:49:d0}
	I1123 08:12:09.834253   31320 main.go:143] libmachine: domain ha-200207-m03 has defined IP address 192.168.39.189 and MAC address 52:54:00:8e:49:d0 in network mk-ha-200207
	I1123 08:12:09.834383   31320 sshutil.go:53] new ssh client: &{IP:192.168.39.189 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/ha-200207-m03/id_rsa Username:docker}
	I1123 08:12:09.919400   31320 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I1123 08:12:09.940525   31320 kubeconfig.go:125] found "ha-200207" server: "https://192.168.39.254:8443"
	I1123 08:12:09.940560   31320 api_server.go:166] Checking apiserver status ...
	I1123 08:12:09.940609   31320 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I1123 08:12:09.962375   31320 ssh_runner.go:195] Run: sudo egrep ^[0-9]+:freezer: /proc/2366/cgroup
	W1123 08:12:09.975780   31320 api_server.go:177] unable to find freezer cgroup: sudo egrep ^[0-9]+:freezer: /proc/2366/cgroup: Process exited with status 1
	stdout:
	
	stderr:
	I1123 08:12:09.975854   31320 ssh_runner.go:195] Run: ls
	I1123 08:12:09.981411   31320 api_server.go:253] Checking apiserver healthz at https://192.168.39.254:8443/healthz ...
	I1123 08:12:09.988164   31320 api_server.go:279] https://192.168.39.254:8443/healthz returned 200:
	ok
	I1123 08:12:09.988186   31320 status.go:463] ha-200207-m03 apiserver status = Running (err=<nil>)
	I1123 08:12:09.988194   31320 status.go:176] ha-200207-m03 status: &{Name:ha-200207-m03 Host:Running Kubelet:Running APIServer:Running Kubeconfig:Configured Worker:false TimeToStop: DockerEnv: PodManEnv:}
	I1123 08:12:09.988217   31320 status.go:174] checking status of ha-200207-m04 ...
	I1123 08:12:09.989962   31320 status.go:371] ha-200207-m04 host status = "Running" (err=<nil>)
	I1123 08:12:09.989978   31320 host.go:66] Checking if "ha-200207-m04" exists ...
	I1123 08:12:09.992962   31320 main.go:143] libmachine: domain ha-200207-m04 has defined MAC address 52:54:00:4c:6d:0c in network mk-ha-200207
	I1123 08:12:09.993412   31320 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:4c:6d:0c", ip: ""} in network mk-ha-200207: {Iface:virbr1 ExpiryTime:2025-11-23 09:11:10 +0000 UTC Type:0 Mac:52:54:00:4c:6d:0c Iaid: IPaddr:192.168.39.191 Prefix:24 Hostname:ha-200207-m04 Clientid:01:52:54:00:4c:6d:0c}
	I1123 08:12:09.993432   31320 main.go:143] libmachine: domain ha-200207-m04 has defined IP address 192.168.39.191 and MAC address 52:54:00:4c:6d:0c in network mk-ha-200207
	I1123 08:12:09.993563   31320 host.go:66] Checking if "ha-200207-m04" exists ...
	I1123 08:12:09.993758   31320 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
	I1123 08:12:09.995966   31320 main.go:143] libmachine: domain ha-200207-m04 has defined MAC address 52:54:00:4c:6d:0c in network mk-ha-200207
	I1123 08:12:09.996401   31320 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:4c:6d:0c", ip: ""} in network mk-ha-200207: {Iface:virbr1 ExpiryTime:2025-11-23 09:11:10 +0000 UTC Type:0 Mac:52:54:00:4c:6d:0c Iaid: IPaddr:192.168.39.191 Prefix:24 Hostname:ha-200207-m04 Clientid:01:52:54:00:4c:6d:0c}
	I1123 08:12:09.996424   31320 main.go:143] libmachine: domain ha-200207-m04 has defined IP address 192.168.39.191 and MAC address 52:54:00:4c:6d:0c in network mk-ha-200207
	I1123 08:12:09.996559   31320 sshutil.go:53] new ssh client: &{IP:192.168.39.191 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/ha-200207-m04/id_rsa Username:docker}
	I1123 08:12:10.078901   31320 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I1123 08:12:10.097015   31320 status.go:176] ha-200207-m04 status: &{Name:ha-200207-m04 Host:Running Kubelet:Running APIServer:Irrelevant Kubeconfig:Irrelevant Worker:true TimeToStop: DockerEnv: PodManEnv:}

                                                
                                                
** /stderr **
--- PASS: TestMultiControlPlane/serial/StopSecondaryNode (14.15s)

                                                
                                    
x
+
TestMultiControlPlane/serial/DegradedAfterControlPlaneNodeStop (0.52s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/DegradedAfterControlPlaneNodeStop
ha_test.go:392: (dbg) Run:  out/minikube-linux-amd64 profile list --output json
--- PASS: TestMultiControlPlane/serial/DegradedAfterControlPlaneNodeStop (0.52s)

                                                
                                    
x
+
TestMultiControlPlane/serial/RestartSecondaryNode (35.57s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/RestartSecondaryNode
ha_test.go:422: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 node start m02 --alsologtostderr -v 5
E1123 08:12:20.955455   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/functional-086932/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
ha_test.go:422: (dbg) Done: out/minikube-linux-amd64 -p ha-200207 node start m02 --alsologtostderr -v 5: (34.518753167s)
ha_test.go:430: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 status --alsologtostderr -v 5
ha_test.go:450: (dbg) Run:  kubectl get nodes
--- PASS: TestMultiControlPlane/serial/RestartSecondaryNode (35.57s)

                                                
                                    
x
+
TestMultiControlPlane/serial/HAppyAfterSecondaryNodeRestart (0.94s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/HAppyAfterSecondaryNodeRestart
ha_test.go:281: (dbg) Run:  out/minikube-linux-amd64 profile list --output json
--- PASS: TestMultiControlPlane/serial/HAppyAfterSecondaryNodeRestart (0.94s)

                                                
                                    
x
+
TestMultiControlPlane/serial/RestartClusterKeepsNodes (176.34s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/RestartClusterKeepsNodes
ha_test.go:458: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 node list --alsologtostderr -v 5
ha_test.go:464: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 stop --alsologtostderr -v 5
ha_test.go:464: (dbg) Done: out/minikube-linux-amd64 -p ha-200207 stop --alsologtostderr -v 5: (41.237924485s)
ha_test.go:469: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 start --wait true --alsologtostderr -v 5
E1123 08:13:42.877338   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/functional-086932/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:14:07.498451   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/addons-085189/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
ha_test.go:469: (dbg) Done: out/minikube-linux-amd64 -p ha-200207 start --wait true --alsologtostderr -v 5: (2m14.944125601s)
ha_test.go:474: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 node list --alsologtostderr -v 5
--- PASS: TestMultiControlPlane/serial/RestartClusterKeepsNodes (176.34s)

                                                
                                    
x
+
TestMultiControlPlane/serial/DeleteSecondaryNode (7.26s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/DeleteSecondaryNode
ha_test.go:489: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 node delete m03 --alsologtostderr -v 5
ha_test.go:489: (dbg) Done: out/minikube-linux-amd64 -p ha-200207 node delete m03 --alsologtostderr -v 5: (6.574022067s)
ha_test.go:495: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 status --alsologtostderr -v 5
ha_test.go:513: (dbg) Run:  kubectl get nodes
ha_test.go:521: (dbg) Run:  kubectl get nodes -o "go-template='{{range .items}}{{range .status.conditions}}{{if eq .type "Ready"}} {{.status}}{{"\n"}}{{end}}{{end}}{{end}}'"
--- PASS: TestMultiControlPlane/serial/DeleteSecondaryNode (7.26s)

                                                
                                    
x
+
TestMultiControlPlane/serial/DegradedAfterSecondaryNodeDelete (0.51s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/DegradedAfterSecondaryNodeDelete
ha_test.go:392: (dbg) Run:  out/minikube-linux-amd64 profile list --output json
--- PASS: TestMultiControlPlane/serial/DegradedAfterSecondaryNodeDelete (0.51s)

                                                
                                    
x
+
TestMultiControlPlane/serial/StopCluster (40.46s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/StopCluster
ha_test.go:533: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 stop --alsologtostderr -v 5
E1123 08:15:59.016140   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/functional-086932/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:16:26.719508   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/functional-086932/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
ha_test.go:533: (dbg) Done: out/minikube-linux-amd64 -p ha-200207 stop --alsologtostderr -v 5: (40.398949149s)
ha_test.go:539: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 status --alsologtostderr -v 5
ha_test.go:539: (dbg) Non-zero exit: out/minikube-linux-amd64 -p ha-200207 status --alsologtostderr -v 5: exit status 7 (64.667254ms)

                                                
                                                
-- stdout --
	ha-200207
	type: Control Plane
	host: Stopped
	kubelet: Stopped
	apiserver: Stopped
	kubeconfig: Stopped
	
	ha-200207-m02
	type: Control Plane
	host: Stopped
	kubelet: Stopped
	apiserver: Stopped
	kubeconfig: Stopped
	
	ha-200207-m04
	type: Worker
	host: Stopped
	kubelet: Stopped
	

                                                
                                                
-- /stdout --
** stderr ** 
	I1123 08:16:31.685749   32982 out.go:360] Setting OutFile to fd 1 ...
	I1123 08:16:31.686039   32982 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I1123 08:16:31.686050   32982 out.go:374] Setting ErrFile to fd 2...
	I1123 08:16:31.686056   32982 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I1123 08:16:31.686281   32982 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21966-18241/.minikube/bin
	I1123 08:16:31.686470   32982 out.go:368] Setting JSON to false
	I1123 08:16:31.686503   32982 mustload.go:66] Loading cluster: ha-200207
	I1123 08:16:31.686610   32982 notify.go:221] Checking for updates...
	I1123 08:16:31.686939   32982 config.go:182] Loaded profile config "ha-200207": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
	I1123 08:16:31.686960   32982 status.go:174] checking status of ha-200207 ...
	I1123 08:16:31.688895   32982 status.go:371] ha-200207 host status = "Stopped" (err=<nil>)
	I1123 08:16:31.688912   32982 status.go:384] host is not running, skipping remaining checks
	I1123 08:16:31.688918   32982 status.go:176] ha-200207 status: &{Name:ha-200207 Host:Stopped Kubelet:Stopped APIServer:Stopped Kubeconfig:Stopped Worker:false TimeToStop: DockerEnv: PodManEnv:}
	I1123 08:16:31.688936   32982 status.go:174] checking status of ha-200207-m02 ...
	I1123 08:16:31.690214   32982 status.go:371] ha-200207-m02 host status = "Stopped" (err=<nil>)
	I1123 08:16:31.690229   32982 status.go:384] host is not running, skipping remaining checks
	I1123 08:16:31.690245   32982 status.go:176] ha-200207-m02 status: &{Name:ha-200207-m02 Host:Stopped Kubelet:Stopped APIServer:Stopped Kubeconfig:Stopped Worker:false TimeToStop: DockerEnv: PodManEnv:}
	I1123 08:16:31.690267   32982 status.go:174] checking status of ha-200207-m04 ...
	I1123 08:16:31.691477   32982 status.go:371] ha-200207-m04 host status = "Stopped" (err=<nil>)
	I1123 08:16:31.691491   32982 status.go:384] host is not running, skipping remaining checks
	I1123 08:16:31.691497   32982 status.go:176] ha-200207-m04 status: &{Name:ha-200207-m04 Host:Stopped Kubelet:Stopped APIServer:Stopped Kubeconfig:Stopped Worker:true TimeToStop: DockerEnv: PodManEnv:}

                                                
                                                
** /stderr **
--- PASS: TestMultiControlPlane/serial/StopCluster (40.46s)

                                                
                                    
x
+
TestMultiControlPlane/serial/RestartCluster (127.98s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/RestartCluster
ha_test.go:562: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 start --wait true --alsologtostderr -v 5 --driver=kvm2 
ha_test.go:562: (dbg) Done: out/minikube-linux-amd64 -p ha-200207 start --wait true --alsologtostderr -v 5 --driver=kvm2 : (2m7.331423192s)
ha_test.go:568: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 status --alsologtostderr -v 5
ha_test.go:586: (dbg) Run:  kubectl get nodes
ha_test.go:594: (dbg) Run:  kubectl get nodes -o "go-template='{{range .items}}{{range .status.conditions}}{{if eq .type "Ready"}} {{.status}}{{"\n"}}{{end}}{{end}}{{end}}'"
--- PASS: TestMultiControlPlane/serial/RestartCluster (127.98s)

                                                
                                    
x
+
TestMultiControlPlane/serial/DegradedAfterClusterRestart (0.53s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/DegradedAfterClusterRestart
ha_test.go:392: (dbg) Run:  out/minikube-linux-amd64 profile list --output json
--- PASS: TestMultiControlPlane/serial/DegradedAfterClusterRestart (0.53s)

                                                
                                    
x
+
TestMultiControlPlane/serial/AddSecondaryNode (92.34s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/AddSecondaryNode
ha_test.go:607: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 node add --control-plane --alsologtostderr -v 5
E1123 08:19:07.497609   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/addons-085189/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
ha_test.go:607: (dbg) Done: out/minikube-linux-amd64 -p ha-200207 node add --control-plane --alsologtostderr -v 5: (1m31.60456285s)
ha_test.go:613: (dbg) Run:  out/minikube-linux-amd64 -p ha-200207 status --alsologtostderr -v 5
--- PASS: TestMultiControlPlane/serial/AddSecondaryNode (92.34s)

                                                
                                    
x
+
TestMultiControlPlane/serial/HAppyAfterSecondaryNodeAdd (0.71s)

                                                
                                                
=== RUN   TestMultiControlPlane/serial/HAppyAfterSecondaryNodeAdd
ha_test.go:281: (dbg) Run:  out/minikube-linux-amd64 profile list --output json
--- PASS: TestMultiControlPlane/serial/HAppyAfterSecondaryNodeAdd (0.71s)

                                                
                                    
x
+
TestImageBuild/serial/Setup (44.41s)

                                                
                                                
=== RUN   TestImageBuild/serial/Setup
image_test.go:69: (dbg) Run:  out/minikube-linux-amd64 start -p image-709187 --driver=kvm2 
E1123 08:20:30.567402   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/addons-085189/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:20:59.016617   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/functional-086932/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
image_test.go:69: (dbg) Done: out/minikube-linux-amd64 start -p image-709187 --driver=kvm2 : (44.409605237s)
--- PASS: TestImageBuild/serial/Setup (44.41s)

                                                
                                    
x
+
TestImageBuild/serial/NormalBuild (1.53s)

                                                
                                                
=== RUN   TestImageBuild/serial/NormalBuild
image_test.go:78: (dbg) Run:  out/minikube-linux-amd64 image build -t aaa:latest ./testdata/image-build/test-normal -p image-709187
image_test.go:78: (dbg) Done: out/minikube-linux-amd64 image build -t aaa:latest ./testdata/image-build/test-normal -p image-709187: (1.530710306s)
--- PASS: TestImageBuild/serial/NormalBuild (1.53s)

                                                
                                    
x
+
TestImageBuild/serial/BuildWithBuildArg (1.06s)

                                                
                                                
=== RUN   TestImageBuild/serial/BuildWithBuildArg
image_test.go:99: (dbg) Run:  out/minikube-linux-amd64 image build -t aaa:latest --build-opt=build-arg=ENV_A=test_env_str --build-opt=no-cache ./testdata/image-build/test-arg -p image-709187
image_test.go:99: (dbg) Done: out/minikube-linux-amd64 image build -t aaa:latest --build-opt=build-arg=ENV_A=test_env_str --build-opt=no-cache ./testdata/image-build/test-arg -p image-709187: (1.064208554s)
--- PASS: TestImageBuild/serial/BuildWithBuildArg (1.06s)

                                                
                                    
x
+
TestImageBuild/serial/BuildWithDockerIgnore (0.71s)

                                                
                                                
=== RUN   TestImageBuild/serial/BuildWithDockerIgnore
image_test.go:133: (dbg) Run:  out/minikube-linux-amd64 image build -t aaa:latest ./testdata/image-build/test-normal --build-opt=no-cache -p image-709187
--- PASS: TestImageBuild/serial/BuildWithDockerIgnore (0.71s)

                                                
                                    
x
+
TestImageBuild/serial/BuildWithSpecifiedDockerfile (1.14s)

                                                
                                                
=== RUN   TestImageBuild/serial/BuildWithSpecifiedDockerfile
image_test.go:88: (dbg) Run:  out/minikube-linux-amd64 image build -t aaa:latest -f inner/Dockerfile ./testdata/image-build/test-f -p image-709187
image_test.go:88: (dbg) Done: out/minikube-linux-amd64 image build -t aaa:latest -f inner/Dockerfile ./testdata/image-build/test-f -p image-709187: (1.141301593s)
--- PASS: TestImageBuild/serial/BuildWithSpecifiedDockerfile (1.14s)

                                                
                                    
x
+
TestJSONOutput/start/Command (89.21s)

                                                
                                                
=== RUN   TestJSONOutput/start/Command
json_output_test.go:63: (dbg) Run:  out/minikube-linux-amd64 start -p json-output-895251 --output=json --user=testUser --memory=3072 --wait=true --driver=kvm2 
json_output_test.go:63: (dbg) Done: out/minikube-linux-amd64 start -p json-output-895251 --output=json --user=testUser --memory=3072 --wait=true --driver=kvm2 : (1m29.208130877s)
--- PASS: TestJSONOutput/start/Command (89.21s)

                                                
                                    
x
+
TestJSONOutput/start/Audit (0s)

                                                
                                                
=== RUN   TestJSONOutput/start/Audit
--- PASS: TestJSONOutput/start/Audit (0.00s)

                                                
                                    
x
+
TestJSONOutput/start/parallel/DistinctCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/start/parallel/DistinctCurrentSteps
=== PAUSE TestJSONOutput/start/parallel/DistinctCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/start/parallel/DistinctCurrentSteps
--- PASS: TestJSONOutput/start/parallel/DistinctCurrentSteps (0.00s)

                                                
                                    
x
+
TestJSONOutput/start/parallel/IncreasingCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/start/parallel/IncreasingCurrentSteps
=== PAUSE TestJSONOutput/start/parallel/IncreasingCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/start/parallel/IncreasingCurrentSteps
--- PASS: TestJSONOutput/start/parallel/IncreasingCurrentSteps (0.00s)

                                                
                                    
x
+
TestJSONOutput/pause/Command (0.59s)

                                                
                                                
=== RUN   TestJSONOutput/pause/Command
json_output_test.go:63: (dbg) Run:  out/minikube-linux-amd64 pause -p json-output-895251 --output=json --user=testUser
--- PASS: TestJSONOutput/pause/Command (0.59s)

                                                
                                    
x
+
TestJSONOutput/pause/Audit (0s)

                                                
                                                
=== RUN   TestJSONOutput/pause/Audit
--- PASS: TestJSONOutput/pause/Audit (0.00s)

                                                
                                    
x
+
TestJSONOutput/pause/parallel/DistinctCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/pause/parallel/DistinctCurrentSteps
=== PAUSE TestJSONOutput/pause/parallel/DistinctCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/pause/parallel/DistinctCurrentSteps
--- PASS: TestJSONOutput/pause/parallel/DistinctCurrentSteps (0.00s)

                                                
                                    
x
+
TestJSONOutput/pause/parallel/IncreasingCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/pause/parallel/IncreasingCurrentSteps
=== PAUSE TestJSONOutput/pause/parallel/IncreasingCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/pause/parallel/IncreasingCurrentSteps
--- PASS: TestJSONOutput/pause/parallel/IncreasingCurrentSteps (0.00s)

                                                
                                    
x
+
TestJSONOutput/unpause/Command (0.59s)

                                                
                                                
=== RUN   TestJSONOutput/unpause/Command
json_output_test.go:63: (dbg) Run:  out/minikube-linux-amd64 unpause -p json-output-895251 --output=json --user=testUser
--- PASS: TestJSONOutput/unpause/Command (0.59s)

                                                
                                    
x
+
TestJSONOutput/unpause/Audit (0s)

                                                
                                                
=== RUN   TestJSONOutput/unpause/Audit
--- PASS: TestJSONOutput/unpause/Audit (0.00s)

                                                
                                    
x
+
TestJSONOutput/unpause/parallel/DistinctCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/unpause/parallel/DistinctCurrentSteps
=== PAUSE TestJSONOutput/unpause/parallel/DistinctCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/unpause/parallel/DistinctCurrentSteps
--- PASS: TestJSONOutput/unpause/parallel/DistinctCurrentSteps (0.00s)

                                                
                                    
x
+
TestJSONOutput/unpause/parallel/IncreasingCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/unpause/parallel/IncreasingCurrentSteps
=== PAUSE TestJSONOutput/unpause/parallel/IncreasingCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/unpause/parallel/IncreasingCurrentSteps
--- PASS: TestJSONOutput/unpause/parallel/IncreasingCurrentSteps (0.00s)

                                                
                                    
x
+
TestJSONOutput/stop/Command (14.02s)

                                                
                                                
=== RUN   TestJSONOutput/stop/Command
json_output_test.go:63: (dbg) Run:  out/minikube-linux-amd64 stop -p json-output-895251 --output=json --user=testUser
json_output_test.go:63: (dbg) Done: out/minikube-linux-amd64 stop -p json-output-895251 --output=json --user=testUser: (14.019988291s)
--- PASS: TestJSONOutput/stop/Command (14.02s)

                                                
                                    
x
+
TestJSONOutput/stop/Audit (0s)

                                                
                                                
=== RUN   TestJSONOutput/stop/Audit
--- PASS: TestJSONOutput/stop/Audit (0.00s)

                                                
                                    
x
+
TestJSONOutput/stop/parallel/DistinctCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/stop/parallel/DistinctCurrentSteps
=== PAUSE TestJSONOutput/stop/parallel/DistinctCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/stop/parallel/DistinctCurrentSteps
--- PASS: TestJSONOutput/stop/parallel/DistinctCurrentSteps (0.00s)

                                                
                                    
x
+
TestJSONOutput/stop/parallel/IncreasingCurrentSteps (0s)

                                                
                                                
=== RUN   TestJSONOutput/stop/parallel/IncreasingCurrentSteps
=== PAUSE TestJSONOutput/stop/parallel/IncreasingCurrentSteps

                                                
                                                

                                                
                                                
=== CONT  TestJSONOutput/stop/parallel/IncreasingCurrentSteps
--- PASS: TestJSONOutput/stop/parallel/IncreasingCurrentSteps (0.00s)

                                                
                                    
x
+
TestErrorJSONOutput (0.23s)

                                                
                                                
=== RUN   TestErrorJSONOutput
json_output_test.go:160: (dbg) Run:  out/minikube-linux-amd64 start -p json-output-error-132033 --memory=3072 --output=json --wait=true --driver=fail
json_output_test.go:160: (dbg) Non-zero exit: out/minikube-linux-amd64 start -p json-output-error-132033 --memory=3072 --output=json --wait=true --driver=fail: exit status 56 (77.386866ms)

                                                
                                                
-- stdout --
	{"specversion":"1.0","id":"4f05e9ce-a36a-49e2-876e-6d5e966a2347","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.step","datacontenttype":"application/json","data":{"currentstep":"0","message":"[json-output-error-132033] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)","name":"Initial Minikube Setup","totalsteps":"19"}}
	{"specversion":"1.0","id":"0e5f8503-0a64-4728-83c7-cf90d07b81d5","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_LOCATION=21966"}}
	{"specversion":"1.0","id":"6e6ac684-7112-4a2a-aee5-704fe94390ce","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true"}}
	{"specversion":"1.0","id":"ef390cd2-a5d2-46aa-85b7-d6f300ffb222","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"KUBECONFIG=/home/jenkins/minikube-integration/21966-18241/kubeconfig"}}
	{"specversion":"1.0","id":"aee41b96-07c7-4e1d-b8f0-2a0cce5fc706","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_HOME=/home/jenkins/minikube-integration/21966-18241/.minikube"}}
	{"specversion":"1.0","id":"67e05664-1c20-4aeb-8787-6b26521136e7","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_BIN=out/minikube-linux-amd64"}}
	{"specversion":"1.0","id":"dfd19c1c-e493-4bd4-b9e1-df53ed29b11c","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_FORCE_SYSTEMD="}}
	{"specversion":"1.0","id":"98391d15-9cfb-49f1-bcba-3b83c7cfe7e5","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.error","datacontenttype":"application/json","data":{"advice":"","exitcode":"56","issues":"","message":"The driver 'fail' is not supported on linux/amd64","name":"DRV_UNSUPPORTED_OS","url":""}}

                                                
                                                
-- /stdout --
helpers_test.go:175: Cleaning up "json-output-error-132033" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-amd64 delete -p json-output-error-132033
--- PASS: TestErrorJSONOutput (0.23s)

                                                
                                    
x
+
TestMainNoArgs (0.06s)

                                                
                                                
=== RUN   TestMainNoArgs
main_test.go:70: (dbg) Run:  out/minikube-linux-amd64
--- PASS: TestMainNoArgs (0.06s)

                                                
                                    
x
+
TestMinikubeProfile (90.31s)

                                                
                                                
=== RUN   TestMinikubeProfile
minikube_profile_test.go:44: (dbg) Run:  out/minikube-linux-amd64 start -p first-473656 --driver=kvm2 
minikube_profile_test.go:44: (dbg) Done: out/minikube-linux-amd64 start -p first-473656 --driver=kvm2 : (41.479005983s)
minikube_profile_test.go:44: (dbg) Run:  out/minikube-linux-amd64 start -p second-475805 --driver=kvm2 
E1123 08:24:07.505068   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/addons-085189/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
minikube_profile_test.go:44: (dbg) Done: out/minikube-linux-amd64 start -p second-475805 --driver=kvm2 : (46.204047265s)
minikube_profile_test.go:51: (dbg) Run:  out/minikube-linux-amd64 profile first-473656
minikube_profile_test.go:55: (dbg) Run:  out/minikube-linux-amd64 profile list -ojson
minikube_profile_test.go:51: (dbg) Run:  out/minikube-linux-amd64 profile second-475805
minikube_profile_test.go:55: (dbg) Run:  out/minikube-linux-amd64 profile list -ojson
helpers_test.go:175: Cleaning up "second-475805" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-amd64 delete -p second-475805
helpers_test.go:175: Cleaning up "first-473656" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-amd64 delete -p first-473656
--- PASS: TestMinikubeProfile (90.31s)

                                                
                                    
x
+
TestMountStart/serial/StartWithMountFirst (21.63s)

                                                
                                                
=== RUN   TestMountStart/serial/StartWithMountFirst
mount_start_test.go:118: (dbg) Run:  out/minikube-linux-amd64 start -p mount-start-1-465134 --memory=3072 --mount-string /tmp/TestMountStartserial2077645012/001:/minikube-host --mount-gid 0 --mount-msize 6543 --mount-port 46464 --mount-uid 0 --no-kubernetes --driver=kvm2 
mount_start_test.go:118: (dbg) Done: out/minikube-linux-amd64 start -p mount-start-1-465134 --memory=3072 --mount-string /tmp/TestMountStartserial2077645012/001:/minikube-host --mount-gid 0 --mount-msize 6543 --mount-port 46464 --mount-uid 0 --no-kubernetes --driver=kvm2 : (20.628992749s)
--- PASS: TestMountStart/serial/StartWithMountFirst (21.63s)

                                                
                                    
x
+
TestMountStart/serial/VerifyMountFirst (0.31s)

                                                
                                                
=== RUN   TestMountStart/serial/VerifyMountFirst
mount_start_test.go:134: (dbg) Run:  out/minikube-linux-amd64 -p mount-start-1-465134 ssh -- ls /minikube-host
mount_start_test.go:147: (dbg) Run:  out/minikube-linux-amd64 -p mount-start-1-465134 ssh -- findmnt --json /minikube-host
--- PASS: TestMountStart/serial/VerifyMountFirst (0.31s)

                                                
                                    
x
+
TestMountStart/serial/StartWithMountSecond (23.78s)

                                                
                                                
=== RUN   TestMountStart/serial/StartWithMountSecond
mount_start_test.go:118: (dbg) Run:  out/minikube-linux-amd64 start -p mount-start-2-477912 --memory=3072 --mount-string /tmp/TestMountStartserial2077645012/001:/minikube-host --mount-gid 0 --mount-msize 6543 --mount-port 46465 --mount-uid 0 --no-kubernetes --driver=kvm2 
mount_start_test.go:118: (dbg) Done: out/minikube-linux-amd64 start -p mount-start-2-477912 --memory=3072 --mount-string /tmp/TestMountStartserial2077645012/001:/minikube-host --mount-gid 0 --mount-msize 6543 --mount-port 46465 --mount-uid 0 --no-kubernetes --driver=kvm2 : (22.779505721s)
--- PASS: TestMountStart/serial/StartWithMountSecond (23.78s)

                                                
                                    
x
+
TestMountStart/serial/VerifyMountSecond (0.3s)

                                                
                                                
=== RUN   TestMountStart/serial/VerifyMountSecond
mount_start_test.go:134: (dbg) Run:  out/minikube-linux-amd64 -p mount-start-2-477912 ssh -- ls /minikube-host
mount_start_test.go:147: (dbg) Run:  out/minikube-linux-amd64 -p mount-start-2-477912 ssh -- findmnt --json /minikube-host
--- PASS: TestMountStart/serial/VerifyMountSecond (0.30s)

                                                
                                    
x
+
TestMountStart/serial/DeleteFirst (0.69s)

                                                
                                                
=== RUN   TestMountStart/serial/DeleteFirst
pause_test.go:132: (dbg) Run:  out/minikube-linux-amd64 delete -p mount-start-1-465134 --alsologtostderr -v=5
--- PASS: TestMountStart/serial/DeleteFirst (0.69s)

                                                
                                    
x
+
TestMountStart/serial/VerifyMountPostDelete (0.3s)

                                                
                                                
=== RUN   TestMountStart/serial/VerifyMountPostDelete
mount_start_test.go:134: (dbg) Run:  out/minikube-linux-amd64 -p mount-start-2-477912 ssh -- ls /minikube-host
mount_start_test.go:147: (dbg) Run:  out/minikube-linux-amd64 -p mount-start-2-477912 ssh -- findmnt --json /minikube-host
--- PASS: TestMountStart/serial/VerifyMountPostDelete (0.30s)

                                                
                                    
x
+
TestMountStart/serial/Stop (1.33s)

                                                
                                                
=== RUN   TestMountStart/serial/Stop
mount_start_test.go:196: (dbg) Run:  out/minikube-linux-amd64 stop -p mount-start-2-477912
mount_start_test.go:196: (dbg) Done: out/minikube-linux-amd64 stop -p mount-start-2-477912: (1.326935413s)
--- PASS: TestMountStart/serial/Stop (1.33s)

                                                
                                    
x
+
TestMountStart/serial/RestartStopped (19.58s)

                                                
                                                
=== RUN   TestMountStart/serial/RestartStopped
mount_start_test.go:207: (dbg) Run:  out/minikube-linux-amd64 start -p mount-start-2-477912
mount_start_test.go:207: (dbg) Done: out/minikube-linux-amd64 start -p mount-start-2-477912: (18.582328698s)
--- PASS: TestMountStart/serial/RestartStopped (19.58s)

                                                
                                    
x
+
TestMountStart/serial/VerifyMountPostStop (0.3s)

                                                
                                                
=== RUN   TestMountStart/serial/VerifyMountPostStop
mount_start_test.go:134: (dbg) Run:  out/minikube-linux-amd64 -p mount-start-2-477912 ssh -- ls /minikube-host
mount_start_test.go:147: (dbg) Run:  out/minikube-linux-amd64 -p mount-start-2-477912 ssh -- findmnt --json /minikube-host
--- PASS: TestMountStart/serial/VerifyMountPostStop (0.30s)

                                                
                                    
x
+
TestMultiNode/serial/FreshStart2Nodes (121.3s)

                                                
                                                
=== RUN   TestMultiNode/serial/FreshStart2Nodes
multinode_test.go:96: (dbg) Run:  out/minikube-linux-amd64 start -p multinode-607904 --wait=true --memory=3072 --nodes=2 -v=5 --alsologtostderr --driver=kvm2 
E1123 08:25:59.016651   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/functional-086932/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:27:22.081545   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/functional-086932/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
multinode_test.go:96: (dbg) Done: out/minikube-linux-amd64 start -p multinode-607904 --wait=true --memory=3072 --nodes=2 -v=5 --alsologtostderr --driver=kvm2 : (2m0.952632917s)
multinode_test.go:102: (dbg) Run:  out/minikube-linux-amd64 -p multinode-607904 status --alsologtostderr
--- PASS: TestMultiNode/serial/FreshStart2Nodes (121.30s)

                                                
                                    
x
+
TestMultiNode/serial/DeployApp2Nodes (5.39s)

                                                
                                                
=== RUN   TestMultiNode/serial/DeployApp2Nodes
multinode_test.go:493: (dbg) Run:  out/minikube-linux-amd64 kubectl -p multinode-607904 -- apply -f ./testdata/multinodes/multinode-pod-dns-test.yaml
multinode_test.go:498: (dbg) Run:  out/minikube-linux-amd64 kubectl -p multinode-607904 -- rollout status deployment/busybox
multinode_test.go:498: (dbg) Done: out/minikube-linux-amd64 kubectl -p multinode-607904 -- rollout status deployment/busybox: (3.647533123s)
multinode_test.go:505: (dbg) Run:  out/minikube-linux-amd64 kubectl -p multinode-607904 -- get pods -o jsonpath='{.items[*].status.podIP}'
multinode_test.go:528: (dbg) Run:  out/minikube-linux-amd64 kubectl -p multinode-607904 -- get pods -o jsonpath='{.items[*].metadata.name}'
multinode_test.go:536: (dbg) Run:  out/minikube-linux-amd64 kubectl -p multinode-607904 -- exec busybox-7b57f96db7-k4k69 -- nslookup kubernetes.io
multinode_test.go:536: (dbg) Run:  out/minikube-linux-amd64 kubectl -p multinode-607904 -- exec busybox-7b57f96db7-tmf8m -- nslookup kubernetes.io
multinode_test.go:546: (dbg) Run:  out/minikube-linux-amd64 kubectl -p multinode-607904 -- exec busybox-7b57f96db7-k4k69 -- nslookup kubernetes.default
multinode_test.go:546: (dbg) Run:  out/minikube-linux-amd64 kubectl -p multinode-607904 -- exec busybox-7b57f96db7-tmf8m -- nslookup kubernetes.default
multinode_test.go:554: (dbg) Run:  out/minikube-linux-amd64 kubectl -p multinode-607904 -- exec busybox-7b57f96db7-k4k69 -- nslookup kubernetes.default.svc.cluster.local
multinode_test.go:554: (dbg) Run:  out/minikube-linux-amd64 kubectl -p multinode-607904 -- exec busybox-7b57f96db7-tmf8m -- nslookup kubernetes.default.svc.cluster.local
--- PASS: TestMultiNode/serial/DeployApp2Nodes (5.39s)

                                                
                                    
x
+
TestMultiNode/serial/PingHostFrom2Pods (0.9s)

                                                
                                                
=== RUN   TestMultiNode/serial/PingHostFrom2Pods
multinode_test.go:564: (dbg) Run:  out/minikube-linux-amd64 kubectl -p multinode-607904 -- get pods -o jsonpath='{.items[*].metadata.name}'
multinode_test.go:572: (dbg) Run:  out/minikube-linux-amd64 kubectl -p multinode-607904 -- exec busybox-7b57f96db7-k4k69 -- sh -c "nslookup host.minikube.internal | awk 'NR==5' | cut -d' ' -f3"
multinode_test.go:583: (dbg) Run:  out/minikube-linux-amd64 kubectl -p multinode-607904 -- exec busybox-7b57f96db7-k4k69 -- sh -c "ping -c 1 192.168.39.1"
multinode_test.go:572: (dbg) Run:  out/minikube-linux-amd64 kubectl -p multinode-607904 -- exec busybox-7b57f96db7-tmf8m -- sh -c "nslookup host.minikube.internal | awk 'NR==5' | cut -d' ' -f3"
multinode_test.go:583: (dbg) Run:  out/minikube-linux-amd64 kubectl -p multinode-607904 -- exec busybox-7b57f96db7-tmf8m -- sh -c "ping -c 1 192.168.39.1"
--- PASS: TestMultiNode/serial/PingHostFrom2Pods (0.90s)

                                                
                                    
x
+
TestMultiNode/serial/AddNode (48.92s)

                                                
                                                
=== RUN   TestMultiNode/serial/AddNode
multinode_test.go:121: (dbg) Run:  out/minikube-linux-amd64 node add -p multinode-607904 -v=5 --alsologtostderr
multinode_test.go:121: (dbg) Done: out/minikube-linux-amd64 node add -p multinode-607904 -v=5 --alsologtostderr: (48.45880549s)
multinode_test.go:127: (dbg) Run:  out/minikube-linux-amd64 -p multinode-607904 status --alsologtostderr
--- PASS: TestMultiNode/serial/AddNode (48.92s)

                                                
                                    
x
+
TestMultiNode/serial/MultiNodeLabels (0.06s)

                                                
                                                
=== RUN   TestMultiNode/serial/MultiNodeLabels
multinode_test.go:221: (dbg) Run:  kubectl --context multinode-607904 get nodes -o "jsonpath=[{range .items[*]}{.metadata.labels},{end}]"
--- PASS: TestMultiNode/serial/MultiNodeLabels (0.06s)

                                                
                                    
x
+
TestMultiNode/serial/ProfileList (0.47s)

                                                
                                                
=== RUN   TestMultiNode/serial/ProfileList
multinode_test.go:143: (dbg) Run:  out/minikube-linux-amd64 profile list --output json
--- PASS: TestMultiNode/serial/ProfileList (0.47s)

                                                
                                    
x
+
TestMultiNode/serial/CopyFile (6.06s)

                                                
                                                
=== RUN   TestMultiNode/serial/CopyFile
multinode_test.go:184: (dbg) Run:  out/minikube-linux-amd64 -p multinode-607904 status --output json --alsologtostderr
helpers_test.go:573: (dbg) Run:  out/minikube-linux-amd64 -p multinode-607904 cp testdata/cp-test.txt multinode-607904:/home/docker/cp-test.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p multinode-607904 ssh -n multinode-607904 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-amd64 -p multinode-607904 cp multinode-607904:/home/docker/cp-test.txt /tmp/TestMultiNodeserialCopyFile1901972782/001/cp-test_multinode-607904.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p multinode-607904 ssh -n multinode-607904 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-amd64 -p multinode-607904 cp multinode-607904:/home/docker/cp-test.txt multinode-607904-m02:/home/docker/cp-test_multinode-607904_multinode-607904-m02.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p multinode-607904 ssh -n multinode-607904 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p multinode-607904 ssh -n multinode-607904-m02 "sudo cat /home/docker/cp-test_multinode-607904_multinode-607904-m02.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-amd64 -p multinode-607904 cp multinode-607904:/home/docker/cp-test.txt multinode-607904-m03:/home/docker/cp-test_multinode-607904_multinode-607904-m03.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p multinode-607904 ssh -n multinode-607904 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p multinode-607904 ssh -n multinode-607904-m03 "sudo cat /home/docker/cp-test_multinode-607904_multinode-607904-m03.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-amd64 -p multinode-607904 cp testdata/cp-test.txt multinode-607904-m02:/home/docker/cp-test.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p multinode-607904 ssh -n multinode-607904-m02 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-amd64 -p multinode-607904 cp multinode-607904-m02:/home/docker/cp-test.txt /tmp/TestMultiNodeserialCopyFile1901972782/001/cp-test_multinode-607904-m02.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p multinode-607904 ssh -n multinode-607904-m02 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-amd64 -p multinode-607904 cp multinode-607904-m02:/home/docker/cp-test.txt multinode-607904:/home/docker/cp-test_multinode-607904-m02_multinode-607904.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p multinode-607904 ssh -n multinode-607904-m02 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p multinode-607904 ssh -n multinode-607904 "sudo cat /home/docker/cp-test_multinode-607904-m02_multinode-607904.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-amd64 -p multinode-607904 cp multinode-607904-m02:/home/docker/cp-test.txt multinode-607904-m03:/home/docker/cp-test_multinode-607904-m02_multinode-607904-m03.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p multinode-607904 ssh -n multinode-607904-m02 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p multinode-607904 ssh -n multinode-607904-m03 "sudo cat /home/docker/cp-test_multinode-607904-m02_multinode-607904-m03.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-amd64 -p multinode-607904 cp testdata/cp-test.txt multinode-607904-m03:/home/docker/cp-test.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p multinode-607904 ssh -n multinode-607904-m03 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-amd64 -p multinode-607904 cp multinode-607904-m03:/home/docker/cp-test.txt /tmp/TestMultiNodeserialCopyFile1901972782/001/cp-test_multinode-607904-m03.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p multinode-607904 ssh -n multinode-607904-m03 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-amd64 -p multinode-607904 cp multinode-607904-m03:/home/docker/cp-test.txt multinode-607904:/home/docker/cp-test_multinode-607904-m03_multinode-607904.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p multinode-607904 ssh -n multinode-607904-m03 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p multinode-607904 ssh -n multinode-607904 "sudo cat /home/docker/cp-test_multinode-607904-m03_multinode-607904.txt"
helpers_test.go:573: (dbg) Run:  out/minikube-linux-amd64 -p multinode-607904 cp multinode-607904-m03:/home/docker/cp-test.txt multinode-607904-m02:/home/docker/cp-test_multinode-607904-m03_multinode-607904-m02.txt
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p multinode-607904 ssh -n multinode-607904-m03 "sudo cat /home/docker/cp-test.txt"
helpers_test.go:551: (dbg) Run:  out/minikube-linux-amd64 -p multinode-607904 ssh -n multinode-607904-m02 "sudo cat /home/docker/cp-test_multinode-607904-m03_multinode-607904-m02.txt"
--- PASS: TestMultiNode/serial/CopyFile (6.06s)

                                                
                                    
x
+
TestMultiNode/serial/StopNode (2.48s)

                                                
                                                
=== RUN   TestMultiNode/serial/StopNode
multinode_test.go:248: (dbg) Run:  out/minikube-linux-amd64 -p multinode-607904 node stop m03
multinode_test.go:248: (dbg) Done: out/minikube-linux-amd64 -p multinode-607904 node stop m03: (1.791247237s)
multinode_test.go:254: (dbg) Run:  out/minikube-linux-amd64 -p multinode-607904 status
multinode_test.go:254: (dbg) Non-zero exit: out/minikube-linux-amd64 -p multinode-607904 status: exit status 7 (342.410556ms)

                                                
                                                
-- stdout --
	multinode-607904
	type: Control Plane
	host: Running
	kubelet: Running
	apiserver: Running
	kubeconfig: Configured
	
	multinode-607904-m02
	type: Worker
	host: Running
	kubelet: Running
	
	multinode-607904-m03
	type: Worker
	host: Stopped
	kubelet: Stopped
	

                                                
                                                
-- /stdout --
multinode_test.go:261: (dbg) Run:  out/minikube-linux-amd64 -p multinode-607904 status --alsologtostderr
multinode_test.go:261: (dbg) Non-zero exit: out/minikube-linux-amd64 -p multinode-607904 status --alsologtostderr: exit status 7 (346.897322ms)

                                                
                                                
-- stdout --
	multinode-607904
	type: Control Plane
	host: Running
	kubelet: Running
	apiserver: Running
	kubeconfig: Configured
	
	multinode-607904-m02
	type: Worker
	host: Running
	kubelet: Running
	
	multinode-607904-m03
	type: Worker
	host: Stopped
	kubelet: Stopped
	

                                                
                                                
-- /stdout --
** stderr ** 
	I1123 08:28:35.866896   39763 out.go:360] Setting OutFile to fd 1 ...
	I1123 08:28:35.866994   39763 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I1123 08:28:35.867001   39763 out.go:374] Setting ErrFile to fd 2...
	I1123 08:28:35.867005   39763 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I1123 08:28:35.867183   39763 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21966-18241/.minikube/bin
	I1123 08:28:35.867359   39763 out.go:368] Setting JSON to false
	I1123 08:28:35.867386   39763 mustload.go:66] Loading cluster: multinode-607904
	I1123 08:28:35.867421   39763 notify.go:221] Checking for updates...
	I1123 08:28:35.867704   39763 config.go:182] Loaded profile config "multinode-607904": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
	I1123 08:28:35.867720   39763 status.go:174] checking status of multinode-607904 ...
	I1123 08:28:35.869614   39763 status.go:371] multinode-607904 host status = "Running" (err=<nil>)
	I1123 08:28:35.869632   39763 host.go:66] Checking if "multinode-607904" exists ...
	I1123 08:28:35.871988   39763 main.go:143] libmachine: domain multinode-607904 has defined MAC address 52:54:00:81:60:9c in network mk-multinode-607904
	I1123 08:28:35.872476   39763 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:81:60:9c", ip: ""} in network mk-multinode-607904: {Iface:virbr1 ExpiryTime:2025-11-23 09:25:46 +0000 UTC Type:0 Mac:52:54:00:81:60:9c Iaid: IPaddr:192.168.39.253 Prefix:24 Hostname:multinode-607904 Clientid:01:52:54:00:81:60:9c}
	I1123 08:28:35.872501   39763 main.go:143] libmachine: domain multinode-607904 has defined IP address 192.168.39.253 and MAC address 52:54:00:81:60:9c in network mk-multinode-607904
	I1123 08:28:35.872634   39763 host.go:66] Checking if "multinode-607904" exists ...
	I1123 08:28:35.872810   39763 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
	I1123 08:28:35.874825   39763 main.go:143] libmachine: domain multinode-607904 has defined MAC address 52:54:00:81:60:9c in network mk-multinode-607904
	I1123 08:28:35.875192   39763 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:81:60:9c", ip: ""} in network mk-multinode-607904: {Iface:virbr1 ExpiryTime:2025-11-23 09:25:46 +0000 UTC Type:0 Mac:52:54:00:81:60:9c Iaid: IPaddr:192.168.39.253 Prefix:24 Hostname:multinode-607904 Clientid:01:52:54:00:81:60:9c}
	I1123 08:28:35.875222   39763 main.go:143] libmachine: domain multinode-607904 has defined IP address 192.168.39.253 and MAC address 52:54:00:81:60:9c in network mk-multinode-607904
	I1123 08:28:35.875376   39763 sshutil.go:53] new ssh client: &{IP:192.168.39.253 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/multinode-607904/id_rsa Username:docker}
	I1123 08:28:35.962062   39763 ssh_runner.go:195] Run: systemctl --version
	I1123 08:28:35.970039   39763 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I1123 08:28:35.989214   39763 kubeconfig.go:125] found "multinode-607904" server: "https://192.168.39.253:8443"
	I1123 08:28:35.989271   39763 api_server.go:166] Checking apiserver status ...
	I1123 08:28:35.989319   39763 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
	I1123 08:28:36.012298   39763 ssh_runner.go:195] Run: sudo egrep ^[0-9]+:freezer: /proc/2511/cgroup
	W1123 08:28:36.024906   39763 api_server.go:177] unable to find freezer cgroup: sudo egrep ^[0-9]+:freezer: /proc/2511/cgroup: Process exited with status 1
	stdout:
	
	stderr:
	I1123 08:28:36.024969   39763 ssh_runner.go:195] Run: ls
	I1123 08:28:36.030688   39763 api_server.go:253] Checking apiserver healthz at https://192.168.39.253:8443/healthz ...
	I1123 08:28:36.035573   39763 api_server.go:279] https://192.168.39.253:8443/healthz returned 200:
	ok
	I1123 08:28:36.035593   39763 status.go:463] multinode-607904 apiserver status = Running (err=<nil>)
	I1123 08:28:36.035601   39763 status.go:176] multinode-607904 status: &{Name:multinode-607904 Host:Running Kubelet:Running APIServer:Running Kubeconfig:Configured Worker:false TimeToStop: DockerEnv: PodManEnv:}
	I1123 08:28:36.035625   39763 status.go:174] checking status of multinode-607904-m02 ...
	I1123 08:28:36.037257   39763 status.go:371] multinode-607904-m02 host status = "Running" (err=<nil>)
	I1123 08:28:36.037280   39763 host.go:66] Checking if "multinode-607904-m02" exists ...
	I1123 08:28:36.040023   39763 main.go:143] libmachine: domain multinode-607904-m02 has defined MAC address 52:54:00:bc:2c:77 in network mk-multinode-607904
	I1123 08:28:36.040539   39763 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:bc:2c:77", ip: ""} in network mk-multinode-607904: {Iface:virbr1 ExpiryTime:2025-11-23 09:26:56 +0000 UTC Type:0 Mac:52:54:00:bc:2c:77 Iaid: IPaddr:192.168.39.203 Prefix:24 Hostname:multinode-607904-m02 Clientid:01:52:54:00:bc:2c:77}
	I1123 08:28:36.040574   39763 main.go:143] libmachine: domain multinode-607904-m02 has defined IP address 192.168.39.203 and MAC address 52:54:00:bc:2c:77 in network mk-multinode-607904
	I1123 08:28:36.040725   39763 host.go:66] Checking if "multinode-607904-m02" exists ...
	I1123 08:28:36.040995   39763 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
	I1123 08:28:36.043387   39763 main.go:143] libmachine: domain multinode-607904-m02 has defined MAC address 52:54:00:bc:2c:77 in network mk-multinode-607904
	I1123 08:28:36.043780   39763 main.go:143] libmachine: found host DHCP lease matching {name: "", mac: "52:54:00:bc:2c:77", ip: ""} in network mk-multinode-607904: {Iface:virbr1 ExpiryTime:2025-11-23 09:26:56 +0000 UTC Type:0 Mac:52:54:00:bc:2c:77 Iaid: IPaddr:192.168.39.203 Prefix:24 Hostname:multinode-607904-m02 Clientid:01:52:54:00:bc:2c:77}
	I1123 08:28:36.043810   39763 main.go:143] libmachine: domain multinode-607904-m02 has defined IP address 192.168.39.203 and MAC address 52:54:00:bc:2c:77 in network mk-multinode-607904
	I1123 08:28:36.043980   39763 sshutil.go:53] new ssh client: &{IP:192.168.39.203 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/21966-18241/.minikube/machines/multinode-607904-m02/id_rsa Username:docker}
	I1123 08:28:36.129874   39763 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
	I1123 08:28:36.147453   39763 status.go:176] multinode-607904-m02 status: &{Name:multinode-607904-m02 Host:Running Kubelet:Running APIServer:Irrelevant Kubeconfig:Irrelevant Worker:true TimeToStop: DockerEnv: PodManEnv:}
	I1123 08:28:36.147492   39763 status.go:174] checking status of multinode-607904-m03 ...
	I1123 08:28:36.149183   39763 status.go:371] multinode-607904-m03 host status = "Stopped" (err=<nil>)
	I1123 08:28:36.149203   39763 status.go:384] host is not running, skipping remaining checks
	I1123 08:28:36.149210   39763 status.go:176] multinode-607904-m03 status: &{Name:multinode-607904-m03 Host:Stopped Kubelet:Stopped APIServer:Stopped Kubeconfig:Stopped Worker:true TimeToStop: DockerEnv: PodManEnv:}

                                                
                                                
** /stderr **
--- PASS: TestMultiNode/serial/StopNode (2.48s)

                                                
                                    
x
+
TestMultiNode/serial/StartAfterStop (41.41s)

                                                
                                                
=== RUN   TestMultiNode/serial/StartAfterStop
multinode_test.go:282: (dbg) Run:  out/minikube-linux-amd64 -p multinode-607904 node start m03 -v=5 --alsologtostderr
E1123 08:29:07.497431   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/addons-085189/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
multinode_test.go:282: (dbg) Done: out/minikube-linux-amd64 -p multinode-607904 node start m03 -v=5 --alsologtostderr: (40.901348203s)
multinode_test.go:290: (dbg) Run:  out/minikube-linux-amd64 -p multinode-607904 status -v=5 --alsologtostderr
multinode_test.go:306: (dbg) Run:  kubectl get nodes
--- PASS: TestMultiNode/serial/StartAfterStop (41.41s)

                                                
                                    
x
+
TestMultiNode/serial/RestartKeepsNodes (179.86s)

                                                
                                                
=== RUN   TestMultiNode/serial/RestartKeepsNodes
multinode_test.go:314: (dbg) Run:  out/minikube-linux-amd64 node list -p multinode-607904
multinode_test.go:321: (dbg) Run:  out/minikube-linux-amd64 stop -p multinode-607904
multinode_test.go:321: (dbg) Done: out/minikube-linux-amd64 stop -p multinode-607904: (28.718080059s)
multinode_test.go:326: (dbg) Run:  out/minikube-linux-amd64 start -p multinode-607904 --wait=true -v=5 --alsologtostderr
E1123 08:30:59.016483   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/functional-086932/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
multinode_test.go:326: (dbg) Done: out/minikube-linux-amd64 start -p multinode-607904 --wait=true -v=5 --alsologtostderr: (2m31.019769701s)
multinode_test.go:331: (dbg) Run:  out/minikube-linux-amd64 node list -p multinode-607904
--- PASS: TestMultiNode/serial/RestartKeepsNodes (179.86s)

                                                
                                    
x
+
TestMultiNode/serial/DeleteNode (2.14s)

                                                
                                                
=== RUN   TestMultiNode/serial/DeleteNode
multinode_test.go:416: (dbg) Run:  out/minikube-linux-amd64 -p multinode-607904 node delete m03
multinode_test.go:416: (dbg) Done: out/minikube-linux-amd64 -p multinode-607904 node delete m03: (1.679520344s)
multinode_test.go:422: (dbg) Run:  out/minikube-linux-amd64 -p multinode-607904 status --alsologtostderr
multinode_test.go:436: (dbg) Run:  kubectl get nodes
multinode_test.go:444: (dbg) Run:  kubectl get nodes -o "go-template='{{range .items}}{{range .status.conditions}}{{if eq .type "Ready"}} {{.status}}{{"\n"}}{{end}}{{end}}{{end}}'"
--- PASS: TestMultiNode/serial/DeleteNode (2.14s)

                                                
                                    
x
+
TestMultiNode/serial/StopMultiNode (26.43s)

                                                
                                                
=== RUN   TestMultiNode/serial/StopMultiNode
multinode_test.go:345: (dbg) Run:  out/minikube-linux-amd64 -p multinode-607904 stop
multinode_test.go:345: (dbg) Done: out/minikube-linux-amd64 -p multinode-607904 stop: (26.305887277s)
multinode_test.go:351: (dbg) Run:  out/minikube-linux-amd64 -p multinode-607904 status
multinode_test.go:351: (dbg) Non-zero exit: out/minikube-linux-amd64 -p multinode-607904 status: exit status 7 (60.317631ms)

                                                
                                                
-- stdout --
	multinode-607904
	type: Control Plane
	host: Stopped
	kubelet: Stopped
	apiserver: Stopped
	kubeconfig: Stopped
	
	multinode-607904-m02
	type: Worker
	host: Stopped
	kubelet: Stopped
	

                                                
                                                
-- /stdout --
multinode_test.go:358: (dbg) Run:  out/minikube-linux-amd64 -p multinode-607904 status --alsologtostderr
multinode_test.go:358: (dbg) Non-zero exit: out/minikube-linux-amd64 -p multinode-607904 status --alsologtostderr: exit status 7 (62.389657ms)

                                                
                                                
-- stdout --
	multinode-607904
	type: Control Plane
	host: Stopped
	kubelet: Stopped
	apiserver: Stopped
	kubeconfig: Stopped
	
	multinode-607904-m02
	type: Worker
	host: Stopped
	kubelet: Stopped
	

                                                
                                                
-- /stdout --
** stderr ** 
	I1123 08:32:45.987939   41199 out.go:360] Setting OutFile to fd 1 ...
	I1123 08:32:45.988207   41199 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I1123 08:32:45.988217   41199 out.go:374] Setting ErrFile to fd 2...
	I1123 08:32:45.988221   41199 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I1123 08:32:45.988441   41199 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21966-18241/.minikube/bin
	I1123 08:32:45.988591   41199 out.go:368] Setting JSON to false
	I1123 08:32:45.988621   41199 mustload.go:66] Loading cluster: multinode-607904
	I1123 08:32:45.988703   41199 notify.go:221] Checking for updates...
	I1123 08:32:45.989155   41199 config.go:182] Loaded profile config "multinode-607904": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
	I1123 08:32:45.989175   41199 status.go:174] checking status of multinode-607904 ...
	I1123 08:32:45.991288   41199 status.go:371] multinode-607904 host status = "Stopped" (err=<nil>)
	I1123 08:32:45.991305   41199 status.go:384] host is not running, skipping remaining checks
	I1123 08:32:45.991311   41199 status.go:176] multinode-607904 status: &{Name:multinode-607904 Host:Stopped Kubelet:Stopped APIServer:Stopped Kubeconfig:Stopped Worker:false TimeToStop: DockerEnv: PodManEnv:}
	I1123 08:32:45.991331   41199 status.go:174] checking status of multinode-607904-m02 ...
	I1123 08:32:45.992606   41199 status.go:371] multinode-607904-m02 host status = "Stopped" (err=<nil>)
	I1123 08:32:45.992622   41199 status.go:384] host is not running, skipping remaining checks
	I1123 08:32:45.992628   41199 status.go:176] multinode-607904-m02 status: &{Name:multinode-607904-m02 Host:Stopped Kubelet:Stopped APIServer:Stopped Kubeconfig:Stopped Worker:true TimeToStop: DockerEnv: PodManEnv:}

                                                
                                                
** /stderr **
--- PASS: TestMultiNode/serial/StopMultiNode (26.43s)

                                                
                                    
x
+
TestMultiNode/serial/RestartMultiNode (109.71s)

                                                
                                                
=== RUN   TestMultiNode/serial/RestartMultiNode
multinode_test.go:376: (dbg) Run:  out/minikube-linux-amd64 start -p multinode-607904 --wait=true -v=5 --alsologtostderr --driver=kvm2 
E1123 08:34:07.497729   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/addons-085189/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
multinode_test.go:376: (dbg) Done: out/minikube-linux-amd64 start -p multinode-607904 --wait=true -v=5 --alsologtostderr --driver=kvm2 : (1m49.238558387s)
multinode_test.go:382: (dbg) Run:  out/minikube-linux-amd64 -p multinode-607904 status --alsologtostderr
multinode_test.go:396: (dbg) Run:  kubectl get nodes
multinode_test.go:404: (dbg) Run:  kubectl get nodes -o "go-template='{{range .items}}{{range .status.conditions}}{{if eq .type "Ready"}} {{.status}}{{"\n"}}{{end}}{{end}}{{end}}'"
--- PASS: TestMultiNode/serial/RestartMultiNode (109.71s)

                                                
                                    
x
+
TestMultiNode/serial/ValidateNameConflict (46.24s)

                                                
                                                
=== RUN   TestMultiNode/serial/ValidateNameConflict
multinode_test.go:455: (dbg) Run:  out/minikube-linux-amd64 node list -p multinode-607904
multinode_test.go:464: (dbg) Run:  out/minikube-linux-amd64 start -p multinode-607904-m02 --driver=kvm2 
multinode_test.go:464: (dbg) Non-zero exit: out/minikube-linux-amd64 start -p multinode-607904-m02 --driver=kvm2 : exit status 14 (77.367348ms)

                                                
                                                
-- stdout --
	* [multinode-607904-m02] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
	  - MINIKUBE_LOCATION=21966
	  - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	  - KUBECONFIG=/home/jenkins/minikube-integration/21966-18241/kubeconfig
	  - MINIKUBE_HOME=/home/jenkins/minikube-integration/21966-18241/.minikube
	  - MINIKUBE_BIN=out/minikube-linux-amd64
	  - MINIKUBE_FORCE_SYSTEMD=
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	! Profile name 'multinode-607904-m02' is duplicated with machine name 'multinode-607904-m02' in profile 'multinode-607904'
	X Exiting due to MK_USAGE: Profile name should be unique

                                                
                                                
** /stderr **
multinode_test.go:472: (dbg) Run:  out/minikube-linux-amd64 start -p multinode-607904-m03 --driver=kvm2 
multinode_test.go:472: (dbg) Done: out/minikube-linux-amd64 start -p multinode-607904-m03 --driver=kvm2 : (45.076712631s)
multinode_test.go:479: (dbg) Run:  out/minikube-linux-amd64 node add -p multinode-607904
multinode_test.go:479: (dbg) Non-zero exit: out/minikube-linux-amd64 node add -p multinode-607904: exit status 80 (210.974829ms)

                                                
                                                
-- stdout --
	* Adding node m03 to cluster multinode-607904 as [worker]
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	X Exiting due to GUEST_NODE_ADD: failed to add node: Node multinode-607904-m03 already exists in multinode-607904-m03 profile
	* 
	╭─────────────────────────────────────────────────────────────────────────────────────────────╮
	│                                                                                             │
	│    * If the above advice does not help, please let us know:                                 │
	│      https://github.com/kubernetes/minikube/issues/new/choose                               │
	│                                                                                             │
	│    * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue.    │
	│    * Please also attach the following file to the GitHub issue:                             │
	│    * - /tmp/minikube_node_040ea7097fd6ed71e65be9a474587f81f0ccd21d_0.log                    │
	│                                                                                             │
	╰─────────────────────────────────────────────────────────────────────────────────────────────╯

                                                
                                                
** /stderr **
multinode_test.go:484: (dbg) Run:  out/minikube-linux-amd64 delete -p multinode-607904-m03
--- PASS: TestMultiNode/serial/ValidateNameConflict (46.24s)

                                                
                                    
x
+
TestPreload (160.23s)

                                                
                                                
=== RUN   TestPreload
preload_test.go:43: (dbg) Run:  out/minikube-linux-amd64 start -p test-preload-779930 --memory=3072 --alsologtostderr --wait=true --preload=false --driver=kvm2  --kubernetes-version=v1.32.0
E1123 08:35:59.015877   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/functional-086932/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
preload_test.go:43: (dbg) Done: out/minikube-linux-amd64 start -p test-preload-779930 --memory=3072 --alsologtostderr --wait=true --preload=false --driver=kvm2  --kubernetes-version=v1.32.0: (1m34.724286202s)
preload_test.go:51: (dbg) Run:  out/minikube-linux-amd64 -p test-preload-779930 image pull gcr.io/k8s-minikube/busybox
preload_test.go:51: (dbg) Done: out/minikube-linux-amd64 -p test-preload-779930 image pull gcr.io/k8s-minikube/busybox: (2.102757604s)
preload_test.go:57: (dbg) Run:  out/minikube-linux-amd64 stop -p test-preload-779930
E1123 08:37:10.569969   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/addons-085189/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
preload_test.go:57: (dbg) Done: out/minikube-linux-amd64 stop -p test-preload-779930: (13.713286407s)
preload_test.go:65: (dbg) Run:  out/minikube-linux-amd64 start -p test-preload-779930 --memory=3072 --alsologtostderr -v=1 --wait=true --driver=kvm2 
preload_test.go:65: (dbg) Done: out/minikube-linux-amd64 start -p test-preload-779930 --memory=3072 --alsologtostderr -v=1 --wait=true --driver=kvm2 : (48.67741414s)
preload_test.go:70: (dbg) Run:  out/minikube-linux-amd64 -p test-preload-779930 image list
helpers_test.go:175: Cleaning up "test-preload-779930" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-amd64 delete -p test-preload-779930
--- PASS: TestPreload (160.23s)

                                                
                                    
x
+
TestScheduledStopUnix (116.15s)

                                                
                                                
=== RUN   TestScheduledStopUnix
scheduled_stop_test.go:128: (dbg) Run:  out/minikube-linux-amd64 start -p scheduled-stop-655746 --memory=3072 --driver=kvm2 
scheduled_stop_test.go:128: (dbg) Done: out/minikube-linux-amd64 start -p scheduled-stop-655746 --memory=3072 --driver=kvm2 : (44.460615355s)
scheduled_stop_test.go:137: (dbg) Run:  out/minikube-linux-amd64 stop -p scheduled-stop-655746 --schedule 5m -v=5 --alsologtostderr
minikube stop output:

                                                
                                                
** stderr ** 
	I1123 08:38:48.183808   43554 out.go:360] Setting OutFile to fd 1 ...
	I1123 08:38:48.183898   43554 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I1123 08:38:48.183902   43554 out.go:374] Setting ErrFile to fd 2...
	I1123 08:38:48.183906   43554 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I1123 08:38:48.184071   43554 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21966-18241/.minikube/bin
	I1123 08:38:48.184315   43554 out.go:368] Setting JSON to false
	I1123 08:38:48.184400   43554 mustload.go:66] Loading cluster: scheduled-stop-655746
	I1123 08:38:48.184728   43554 config.go:182] Loaded profile config "scheduled-stop-655746": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
	I1123 08:38:48.184798   43554 profile.go:143] Saving config to /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/scheduled-stop-655746/config.json ...
	I1123 08:38:48.184963   43554 mustload.go:66] Loading cluster: scheduled-stop-655746
	I1123 08:38:48.185055   43554 config.go:182] Loaded profile config "scheduled-stop-655746": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1

                                                
                                                
** /stderr **
scheduled_stop_test.go:204: (dbg) Run:  out/minikube-linux-amd64 status --format={{.TimeToStop}} -p scheduled-stop-655746 -n scheduled-stop-655746
scheduled_stop_test.go:172: signal error was:  <nil>
scheduled_stop_test.go:137: (dbg) Run:  out/minikube-linux-amd64 stop -p scheduled-stop-655746 --schedule 15s -v=5 --alsologtostderr
minikube stop output:

                                                
                                                
** stderr ** 
	I1123 08:38:48.494783   43600 out.go:360] Setting OutFile to fd 1 ...
	I1123 08:38:48.495303   43600 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I1123 08:38:48.495318   43600 out.go:374] Setting ErrFile to fd 2...
	I1123 08:38:48.495325   43600 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I1123 08:38:48.495796   43600 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21966-18241/.minikube/bin
	I1123 08:38:48.496347   43600 out.go:368] Setting JSON to false
	I1123 08:38:48.496591   43600 daemonize_unix.go:73] killing process 43589 as it is an old scheduled stop
	I1123 08:38:48.496708   43600 mustload.go:66] Loading cluster: scheduled-stop-655746
	I1123 08:38:48.497201   43600 config.go:182] Loaded profile config "scheduled-stop-655746": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
	I1123 08:38:48.497311   43600 profile.go:143] Saving config to /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/scheduled-stop-655746/config.json ...
	I1123 08:38:48.497512   43600 mustload.go:66] Loading cluster: scheduled-stop-655746
	I1123 08:38:48.497648   43600 config.go:182] Loaded profile config "scheduled-stop-655746": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1

                                                
                                                
** /stderr **
scheduled_stop_test.go:172: signal error was:  os: process already finished
I1123 08:38:48.502590   22148 retry.go:31] will retry after 59.34µs: open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/scheduled-stop-655746/pid: no such file or directory
I1123 08:38:48.503748   22148 retry.go:31] will retry after 97.479µs: open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/scheduled-stop-655746/pid: no such file or directory
I1123 08:38:48.504890   22148 retry.go:31] will retry after 287.008µs: open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/scheduled-stop-655746/pid: no such file or directory
I1123 08:38:48.506032   22148 retry.go:31] will retry after 277.193µs: open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/scheduled-stop-655746/pid: no such file or directory
I1123 08:38:48.507154   22148 retry.go:31] will retry after 540.709µs: open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/scheduled-stop-655746/pid: no such file or directory
I1123 08:38:48.508283   22148 retry.go:31] will retry after 688.125µs: open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/scheduled-stop-655746/pid: no such file or directory
I1123 08:38:48.509410   22148 retry.go:31] will retry after 1.169236ms: open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/scheduled-stop-655746/pid: no such file or directory
I1123 08:38:48.511614   22148 retry.go:31] will retry after 1.476129ms: open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/scheduled-stop-655746/pid: no such file or directory
I1123 08:38:48.513806   22148 retry.go:31] will retry after 3.645639ms: open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/scheduled-stop-655746/pid: no such file or directory
I1123 08:38:48.518024   22148 retry.go:31] will retry after 4.98111ms: open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/scheduled-stop-655746/pid: no such file or directory
I1123 08:38:48.523272   22148 retry.go:31] will retry after 5.483093ms: open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/scheduled-stop-655746/pid: no such file or directory
I1123 08:38:48.529504   22148 retry.go:31] will retry after 10.929791ms: open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/scheduled-stop-655746/pid: no such file or directory
I1123 08:38:48.540742   22148 retry.go:31] will retry after 8.275468ms: open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/scheduled-stop-655746/pid: no such file or directory
I1123 08:38:48.550024   22148 retry.go:31] will retry after 25.342397ms: open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/scheduled-stop-655746/pid: no such file or directory
I1123 08:38:48.576306   22148 retry.go:31] will retry after 30.424439ms: open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/scheduled-stop-655746/pid: no such file or directory
I1123 08:38:48.607564   22148 retry.go:31] will retry after 62.583328ms: open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/scheduled-stop-655746/pid: no such file or directory
scheduled_stop_test.go:137: (dbg) Run:  out/minikube-linux-amd64 stop -p scheduled-stop-655746 --cancel-scheduled
minikube stop output:

                                                
                                                
-- stdout --
	* All existing scheduled stops cancelled

                                                
                                                
-- /stdout --
E1123 08:39:07.504941   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/addons-085189/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
scheduled_stop_test.go:189: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Host}} -p scheduled-stop-655746 -n scheduled-stop-655746
scheduled_stop_test.go:218: (dbg) Run:  out/minikube-linux-amd64 status -p scheduled-stop-655746
scheduled_stop_test.go:137: (dbg) Run:  out/minikube-linux-amd64 stop -p scheduled-stop-655746 --schedule 15s -v=5 --alsologtostderr
minikube stop output:

                                                
                                                
** stderr ** 
	I1123 08:39:14.250120   43749 out.go:360] Setting OutFile to fd 1 ...
	I1123 08:39:14.250269   43749 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I1123 08:39:14.250278   43749 out.go:374] Setting ErrFile to fd 2...
	I1123 08:39:14.250283   43749 out.go:408] TERM=,COLORTERM=, which probably does not support color
	I1123 08:39:14.250508   43749 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21966-18241/.minikube/bin
	I1123 08:39:14.250731   43749 out.go:368] Setting JSON to false
	I1123 08:39:14.250812   43749 mustload.go:66] Loading cluster: scheduled-stop-655746
	I1123 08:39:14.251116   43749 config.go:182] Loaded profile config "scheduled-stop-655746": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
	I1123 08:39:14.251189   43749 profile.go:143] Saving config to /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/scheduled-stop-655746/config.json ...
	I1123 08:39:14.251386   43749 mustload.go:66] Loading cluster: scheduled-stop-655746
	I1123 08:39:14.251483   43749 config.go:182] Loaded profile config "scheduled-stop-655746": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1

                                                
                                                
** /stderr **
scheduled_stop_test.go:172: signal error was:  os: process already finished
scheduled_stop_test.go:218: (dbg) Run:  out/minikube-linux-amd64 status -p scheduled-stop-655746
scheduled_stop_test.go:218: (dbg) Non-zero exit: out/minikube-linux-amd64 status -p scheduled-stop-655746: exit status 7 (61.039231ms)

                                                
                                                
-- stdout --
	scheduled-stop-655746
	type: Control Plane
	host: Stopped
	kubelet: Stopped
	apiserver: Stopped
	kubeconfig: Stopped
	

                                                
                                                
-- /stdout --
scheduled_stop_test.go:189: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Host}} -p scheduled-stop-655746 -n scheduled-stop-655746
scheduled_stop_test.go:189: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.Host}} -p scheduled-stop-655746 -n scheduled-stop-655746: exit status 7 (59.790111ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
scheduled_stop_test.go:189: status error: exit status 7 (may be ok)
helpers_test.go:175: Cleaning up "scheduled-stop-655746" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-amd64 delete -p scheduled-stop-655746
--- PASS: TestScheduledStopUnix (116.15s)

                                                
                                    
x
+
TestSkaffold (130.94s)

                                                
                                                
=== RUN   TestSkaffold
skaffold_test.go:59: (dbg) Run:  /tmp/skaffold.exe1974011572 version
skaffold_test.go:63: skaffold version: v2.17.0
skaffold_test.go:66: (dbg) Run:  out/minikube-linux-amd64 start -p skaffold-153557 --memory=3072 --driver=kvm2 
skaffold_test.go:66: (dbg) Done: out/minikube-linux-amd64 start -p skaffold-153557 --memory=3072 --driver=kvm2 : (46.061504671s)
skaffold_test.go:86: copying out/minikube-linux-amd64 to /home/jenkins/workspace/KVM_Linux_integration/out/minikube
skaffold_test.go:105: (dbg) Run:  /tmp/skaffold.exe1974011572 run --minikube-profile skaffold-153557 --kube-context skaffold-153557 --status-check=true --port-forward=false --interactive=false
E1123 08:40:59.015797   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/functional-086932/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
skaffold_test.go:105: (dbg) Done: /tmp/skaffold.exe1974011572 run --minikube-profile skaffold-153557 --kube-context skaffold-153557 --status-check=true --port-forward=false --interactive=false: (1m12.066876241s)
skaffold_test.go:111: (dbg) TestSkaffold: waiting 1m0s for pods matching "app=leeroy-app" in namespace "default" ...
helpers_test.go:352: "leeroy-app-79dcfc7c5d-tsjwf" [326045dc-e626-4183-895d-75ee7eb23690] Running
skaffold_test.go:111: (dbg) TestSkaffold: app=leeroy-app healthy within 6.007138682s
skaffold_test.go:114: (dbg) TestSkaffold: waiting 1m0s for pods matching "app=leeroy-web" in namespace "default" ...
helpers_test.go:352: "leeroy-web-747bb8df47-hmk4k" [68249c3a-6ef6-4b60-891c-28ebcbffe4b3] Running
skaffold_test.go:114: (dbg) TestSkaffold: app=leeroy-web healthy within 5.00604736s
helpers_test.go:175: Cleaning up "skaffold-153557" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-amd64 delete -p skaffold-153557
--- PASS: TestSkaffold (130.94s)

                                                
                                    
x
+
TestRunningBinaryUpgrade (175.85s)

                                                
                                                
=== RUN   TestRunningBinaryUpgrade
=== PAUSE TestRunningBinaryUpgrade

                                                
                                                

                                                
                                                
=== CONT  TestRunningBinaryUpgrade
version_upgrade_test.go:120: (dbg) Run:  /tmp/minikube-v1.32.0.1035874392 start -p running-upgrade-134603 --memory=3072 --vm-driver=kvm2 
version_upgrade_test.go:120: (dbg) Done: /tmp/minikube-v1.32.0.1035874392 start -p running-upgrade-134603 --memory=3072 --vm-driver=kvm2 : (1m52.999164041s)
version_upgrade_test.go:130: (dbg) Run:  out/minikube-linux-amd64 start -p running-upgrade-134603 --memory=3072 --alsologtostderr -v=1 --driver=kvm2 
E1123 08:44:07.498121   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/addons-085189/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
version_upgrade_test.go:130: (dbg) Done: out/minikube-linux-amd64 start -p running-upgrade-134603 --memory=3072 --alsologtostderr -v=1 --driver=kvm2 : (1m1.422019203s)
helpers_test.go:175: Cleaning up "running-upgrade-134603" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-amd64 delete -p running-upgrade-134603
--- PASS: TestRunningBinaryUpgrade (175.85s)

                                                
                                    
x
+
TestKubernetesUpgrade (220s)

                                                
                                                
=== RUN   TestKubernetesUpgrade
=== PAUSE TestKubernetesUpgrade

                                                
                                                

                                                
                                                
=== CONT  TestKubernetesUpgrade
version_upgrade_test.go:222: (dbg) Run:  out/minikube-linux-amd64 start -p kubernetes-upgrade-390801 --memory=3072 --kubernetes-version=v1.28.0 --alsologtostderr -v=1 --driver=kvm2 
version_upgrade_test.go:222: (dbg) Done: out/minikube-linux-amd64 start -p kubernetes-upgrade-390801 --memory=3072 --kubernetes-version=v1.28.0 --alsologtostderr -v=1 --driver=kvm2 : (48.47243142s)
version_upgrade_test.go:227: (dbg) Run:  out/minikube-linux-amd64 stop -p kubernetes-upgrade-390801
version_upgrade_test.go:227: (dbg) Done: out/minikube-linux-amd64 stop -p kubernetes-upgrade-390801: (14.572275566s)
version_upgrade_test.go:232: (dbg) Run:  out/minikube-linux-amd64 -p kubernetes-upgrade-390801 status --format={{.Host}}
version_upgrade_test.go:232: (dbg) Non-zero exit: out/minikube-linux-amd64 -p kubernetes-upgrade-390801 status --format={{.Host}}: exit status 7 (76.324683ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
version_upgrade_test.go:234: status error: exit status 7 (may be ok)
version_upgrade_test.go:243: (dbg) Run:  out/minikube-linux-amd64 start -p kubernetes-upgrade-390801 --memory=3072 --kubernetes-version=v1.34.1 --alsologtostderr -v=1 --driver=kvm2 
version_upgrade_test.go:243: (dbg) Done: out/minikube-linux-amd64 start -p kubernetes-upgrade-390801 --memory=3072 --kubernetes-version=v1.34.1 --alsologtostderr -v=1 --driver=kvm2 : (1m2.338463121s)
version_upgrade_test.go:248: (dbg) Run:  kubectl --context kubernetes-upgrade-390801 version --output=json
version_upgrade_test.go:267: Attempting to downgrade Kubernetes (should fail)
version_upgrade_test.go:269: (dbg) Run:  out/minikube-linux-amd64 start -p kubernetes-upgrade-390801 --memory=3072 --kubernetes-version=v1.28.0 --driver=kvm2 
version_upgrade_test.go:269: (dbg) Non-zero exit: out/minikube-linux-amd64 start -p kubernetes-upgrade-390801 --memory=3072 --kubernetes-version=v1.28.0 --driver=kvm2 : exit status 106 (93.633861ms)

                                                
                                                
-- stdout --
	* [kubernetes-upgrade-390801] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
	  - MINIKUBE_LOCATION=21966
	  - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	  - KUBECONFIG=/home/jenkins/minikube-integration/21966-18241/kubeconfig
	  - MINIKUBE_HOME=/home/jenkins/minikube-integration/21966-18241/.minikube
	  - MINIKUBE_BIN=out/minikube-linux-amd64
	  - MINIKUBE_FORCE_SYSTEMD=
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	X Exiting due to K8S_DOWNGRADE_UNSUPPORTED: Unable to safely downgrade existing Kubernetes v1.34.1 cluster to v1.28.0
	* Suggestion: 
	
	    1) Recreate the cluster with Kubernetes 1.28.0, by running:
	    
	    minikube delete -p kubernetes-upgrade-390801
	    minikube start -p kubernetes-upgrade-390801 --kubernetes-version=v1.28.0
	    
	    2) Create a second cluster with Kubernetes 1.28.0, by running:
	    
	    minikube start -p kubernetes-upgrade-3908012 --kubernetes-version=v1.28.0
	    
	    3) Use the existing cluster at version Kubernetes 1.34.1, by running:
	    
	    minikube start -p kubernetes-upgrade-390801 --kubernetes-version=v1.34.1
	    

                                                
                                                
** /stderr **
version_upgrade_test.go:273: Attempting restart after unsuccessful downgrade
version_upgrade_test.go:275: (dbg) Run:  out/minikube-linux-amd64 start -p kubernetes-upgrade-390801 --memory=3072 --kubernetes-version=v1.34.1 --alsologtostderr -v=1 --driver=kvm2 
version_upgrade_test.go:275: (dbg) Done: out/minikube-linux-amd64 start -p kubernetes-upgrade-390801 --memory=3072 --kubernetes-version=v1.34.1 --alsologtostderr -v=1 --driver=kvm2 : (1m33.135222591s)
helpers_test.go:175: Cleaning up "kubernetes-upgrade-390801" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-amd64 delete -p kubernetes-upgrade-390801
helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p kubernetes-upgrade-390801: (1.254902162s)
--- PASS: TestKubernetesUpgrade (220.00s)

                                                
                                    
x
+
TestISOImage/Setup (62.62s)

                                                
                                                
=== RUN   TestISOImage/Setup
iso_test.go:47: (dbg) Run:  out/minikube-linux-amd64 start -p guest-773058 --no-kubernetes --driver=kvm2 
iso_test.go:47: (dbg) Done: out/minikube-linux-amd64 start -p guest-773058 --no-kubernetes --driver=kvm2 : (1m2.622093803s)
--- PASS: TestISOImage/Setup (62.62s)

                                                
                                    
x
+
TestISOImage/Binaries/crictl (0.22s)

                                                
                                                
=== RUN   TestISOImage/Binaries/crictl
=== PAUSE TestISOImage/Binaries/crictl

                                                
                                                

                                                
                                                
=== CONT  TestISOImage/Binaries/crictl
iso_test.go:76: (dbg) Run:  out/minikube-linux-amd64 -p guest-773058 ssh "which crictl"
--- PASS: TestISOImage/Binaries/crictl (0.22s)

                                                
                                    
x
+
TestISOImage/Binaries/curl (0.19s)

                                                
                                                
=== RUN   TestISOImage/Binaries/curl
=== PAUSE TestISOImage/Binaries/curl

                                                
                                                

                                                
                                                
=== CONT  TestISOImage/Binaries/curl
iso_test.go:76: (dbg) Run:  out/minikube-linux-amd64 -p guest-773058 ssh "which curl"
--- PASS: TestISOImage/Binaries/curl (0.19s)

                                                
                                    
x
+
TestISOImage/Binaries/docker (0.19s)

                                                
                                                
=== RUN   TestISOImage/Binaries/docker
=== PAUSE TestISOImage/Binaries/docker

                                                
                                                

                                                
                                                
=== CONT  TestISOImage/Binaries/docker
iso_test.go:76: (dbg) Run:  out/minikube-linux-amd64 -p guest-773058 ssh "which docker"
--- PASS: TestISOImage/Binaries/docker (0.19s)

                                                
                                    
x
+
TestISOImage/Binaries/git (0.2s)

                                                
                                                
=== RUN   TestISOImage/Binaries/git
=== PAUSE TestISOImage/Binaries/git

                                                
                                                

                                                
                                                
=== CONT  TestISOImage/Binaries/git
iso_test.go:76: (dbg) Run:  out/minikube-linux-amd64 -p guest-773058 ssh "which git"
--- PASS: TestISOImage/Binaries/git (0.20s)

                                                
                                    
x
+
TestISOImage/Binaries/iptables (0.21s)

                                                
                                                
=== RUN   TestISOImage/Binaries/iptables
=== PAUSE TestISOImage/Binaries/iptables

                                                
                                                

                                                
                                                
=== CONT  TestISOImage/Binaries/iptables
iso_test.go:76: (dbg) Run:  out/minikube-linux-amd64 -p guest-773058 ssh "which iptables"
--- PASS: TestISOImage/Binaries/iptables (0.21s)

                                                
                                    
x
+
TestISOImage/Binaries/podman (0.2s)

                                                
                                                
=== RUN   TestISOImage/Binaries/podman
=== PAUSE TestISOImage/Binaries/podman

                                                
                                                

                                                
                                                
=== CONT  TestISOImage/Binaries/podman
iso_test.go:76: (dbg) Run:  out/minikube-linux-amd64 -p guest-773058 ssh "which podman"
--- PASS: TestISOImage/Binaries/podman (0.20s)

                                                
                                    
x
+
TestISOImage/Binaries/rsync (0.2s)

                                                
                                                
=== RUN   TestISOImage/Binaries/rsync
=== PAUSE TestISOImage/Binaries/rsync

                                                
                                                

                                                
                                                
=== CONT  TestISOImage/Binaries/rsync
iso_test.go:76: (dbg) Run:  out/minikube-linux-amd64 -p guest-773058 ssh "which rsync"
--- PASS: TestISOImage/Binaries/rsync (0.20s)

                                                
                                    
x
+
TestISOImage/Binaries/socat (0.2s)

                                                
                                                
=== RUN   TestISOImage/Binaries/socat
=== PAUSE TestISOImage/Binaries/socat

                                                
                                                

                                                
                                                
=== CONT  TestISOImage/Binaries/socat
iso_test.go:76: (dbg) Run:  out/minikube-linux-amd64 -p guest-773058 ssh "which socat"
E1123 08:53:50.571663   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/addons-085189/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
--- PASS: TestISOImage/Binaries/socat (0.20s)

                                                
                                    
x
+
TestISOImage/Binaries/wget (0.19s)

                                                
                                                
=== RUN   TestISOImage/Binaries/wget
=== PAUSE TestISOImage/Binaries/wget

                                                
                                                

                                                
                                                
=== CONT  TestISOImage/Binaries/wget
iso_test.go:76: (dbg) Run:  out/minikube-linux-amd64 -p guest-773058 ssh "which wget"
--- PASS: TestISOImage/Binaries/wget (0.19s)

                                                
                                    
x
+
TestISOImage/Binaries/VBoxControl (0.21s)

                                                
                                                
=== RUN   TestISOImage/Binaries/VBoxControl
=== PAUSE TestISOImage/Binaries/VBoxControl

                                                
                                                

                                                
                                                
=== CONT  TestISOImage/Binaries/VBoxControl
iso_test.go:76: (dbg) Run:  out/minikube-linux-amd64 -p guest-773058 ssh "which VBoxControl"
--- PASS: TestISOImage/Binaries/VBoxControl (0.21s)

                                                
                                    
x
+
TestISOImage/Binaries/VBoxService (0.2s)

                                                
                                                
=== RUN   TestISOImage/Binaries/VBoxService
=== PAUSE TestISOImage/Binaries/VBoxService

                                                
                                                

                                                
                                                
=== CONT  TestISOImage/Binaries/VBoxService
iso_test.go:76: (dbg) Run:  out/minikube-linux-amd64 -p guest-773058 ssh "which VBoxService"
--- PASS: TestISOImage/Binaries/VBoxService (0.20s)

                                                
                                    
x
+
TestStoppedBinaryUpgrade/Setup (0.46s)

                                                
                                                
=== RUN   TestStoppedBinaryUpgrade/Setup
--- PASS: TestStoppedBinaryUpgrade/Setup (0.46s)

                                                
                                    
x
+
TestStoppedBinaryUpgrade/Upgrade (131.83s)

                                                
                                                
=== RUN   TestStoppedBinaryUpgrade/Upgrade
version_upgrade_test.go:183: (dbg) Run:  /tmp/minikube-v1.32.0.2157562375 start -p stopped-upgrade-419084 --memory=3072 --vm-driver=kvm2 
E1123 08:44:02.083538   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/functional-086932/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
version_upgrade_test.go:183: (dbg) Done: /tmp/minikube-v1.32.0.2157562375 start -p stopped-upgrade-419084 --memory=3072 --vm-driver=kvm2 : (1m24.42142299s)
version_upgrade_test.go:192: (dbg) Run:  /tmp/minikube-v1.32.0.2157562375 -p stopped-upgrade-419084 stop
version_upgrade_test.go:192: (dbg) Done: /tmp/minikube-v1.32.0.2157562375 -p stopped-upgrade-419084 stop: (3.432421124s)
version_upgrade_test.go:198: (dbg) Run:  out/minikube-linux-amd64 start -p stopped-upgrade-419084 --memory=3072 --alsologtostderr -v=1 --driver=kvm2 
version_upgrade_test.go:198: (dbg) Done: out/minikube-linux-amd64 start -p stopped-upgrade-419084 --memory=3072 --alsologtostderr -v=1 --driver=kvm2 : (43.972617654s)
--- PASS: TestStoppedBinaryUpgrade/Upgrade (131.83s)

                                                
                                    
x
+
TestStoppedBinaryUpgrade/MinikubeLogs (0.99s)

                                                
                                                
=== RUN   TestStoppedBinaryUpgrade/MinikubeLogs
version_upgrade_test.go:206: (dbg) Run:  out/minikube-linux-amd64 logs -p stopped-upgrade-419084
--- PASS: TestStoppedBinaryUpgrade/MinikubeLogs (0.99s)

                                                
                                    
x
+
TestPause/serial/Start (95.83s)

                                                
                                                
=== RUN   TestPause/serial/Start
pause_test.go:80: (dbg) Run:  out/minikube-linux-amd64 start -p pause-005751 --memory=3072 --install-addons=false --wait=all --driver=kvm2 
pause_test.go:80: (dbg) Done: out/minikube-linux-amd64 start -p pause-005751 --memory=3072 --install-addons=false --wait=all --driver=kvm2 : (1m35.82654983s)
--- PASS: TestPause/serial/Start (95.83s)

                                                
                                    
x
+
TestNoKubernetes/serial/StartNoK8sWithVersion (0.1s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/StartNoK8sWithVersion
no_kubernetes_test.go:108: (dbg) Run:  out/minikube-linux-amd64 start -p NoKubernetes-106041 --no-kubernetes --kubernetes-version=v1.28.0 --driver=kvm2 
no_kubernetes_test.go:108: (dbg) Non-zero exit: out/minikube-linux-amd64 start -p NoKubernetes-106041 --no-kubernetes --kubernetes-version=v1.28.0 --driver=kvm2 : exit status 14 (95.598056ms)

                                                
                                                
-- stdout --
	* [NoKubernetes-106041] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
	  - MINIKUBE_LOCATION=21966
	  - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
	  - KUBECONFIG=/home/jenkins/minikube-integration/21966-18241/kubeconfig
	  - MINIKUBE_HOME=/home/jenkins/minikube-integration/21966-18241/.minikube
	  - MINIKUBE_BIN=out/minikube-linux-amd64
	  - MINIKUBE_FORCE_SYSTEMD=
	
	

                                                
                                                
-- /stdout --
** stderr ** 
	X Exiting due to MK_USAGE: cannot specify --kubernetes-version with --no-kubernetes,
	to unset a global config run:
	
	$ minikube config unset kubernetes-version

                                                
                                                
** /stderr **
--- PASS: TestNoKubernetes/serial/StartNoK8sWithVersion (0.10s)

                                                
                                    
x
+
TestNoKubernetes/serial/StartWithK8s (68.12s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/StartWithK8s
no_kubernetes_test.go:120: (dbg) Run:  out/minikube-linux-amd64 start -p NoKubernetes-106041 --memory=3072 --alsologtostderr -v=5 --driver=kvm2 
E1123 08:47:39.706673   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/skaffold-153557/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
no_kubernetes_test.go:120: (dbg) Done: out/minikube-linux-amd64 start -p NoKubernetes-106041 --memory=3072 --alsologtostderr -v=5 --driver=kvm2 : (1m7.849423263s)
no_kubernetes_test.go:225: (dbg) Run:  out/minikube-linux-amd64 -p NoKubernetes-106041 status -o json
--- PASS: TestNoKubernetes/serial/StartWithK8s (68.12s)

                                                
                                    
x
+
TestNetworkPlugins/group/auto/Start (80.22s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/auto/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-amd64 start -p auto-564164 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --driver=kvm2 
E1123 08:48:20.668950   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/skaffold-153557/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
net_test.go:112: (dbg) Done: out/minikube-linux-amd64 start -p auto-564164 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --driver=kvm2 : (1m20.21502695s)
--- PASS: TestNetworkPlugins/group/auto/Start (80.22s)

                                                
                                    
x
+
TestNoKubernetes/serial/StartWithStopK8s (15.89s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/StartWithStopK8s
no_kubernetes_test.go:137: (dbg) Run:  out/minikube-linux-amd64 start -p NoKubernetes-106041 --no-kubernetes --memory=3072 --alsologtostderr -v=5 --driver=kvm2 
no_kubernetes_test.go:137: (dbg) Done: out/minikube-linux-amd64 start -p NoKubernetes-106041 --no-kubernetes --memory=3072 --alsologtostderr -v=5 --driver=kvm2 : (14.748381354s)
no_kubernetes_test.go:225: (dbg) Run:  out/minikube-linux-amd64 -p NoKubernetes-106041 status -o json
no_kubernetes_test.go:225: (dbg) Non-zero exit: out/minikube-linux-amd64 -p NoKubernetes-106041 status -o json: exit status 2 (236.581565ms)

                                                
                                                
-- stdout --
	{"Name":"NoKubernetes-106041","Host":"Running","Kubelet":"Stopped","APIServer":"Stopped","Kubeconfig":"Configured","Worker":false}

                                                
                                                
-- /stdout --
no_kubernetes_test.go:149: (dbg) Run:  out/minikube-linux-amd64 delete -p NoKubernetes-106041
--- PASS: TestNoKubernetes/serial/StartWithStopK8s (15.89s)

                                                
                                    
x
+
TestPause/serial/SecondStartNoReconfiguration (62.54s)

                                                
                                                
=== RUN   TestPause/serial/SecondStartNoReconfiguration
pause_test.go:92: (dbg) Run:  out/minikube-linux-amd64 start -p pause-005751 --alsologtostderr -v=1 --driver=kvm2 
pause_test.go:92: (dbg) Done: out/minikube-linux-amd64 start -p pause-005751 --alsologtostderr -v=1 --driver=kvm2 : (1m2.513069998s)
--- PASS: TestPause/serial/SecondStartNoReconfiguration (62.54s)

                                                
                                    
x
+
TestNoKubernetes/serial/Start (25.32s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/Start
no_kubernetes_test.go:161: (dbg) Run:  out/minikube-linux-amd64 start -p NoKubernetes-106041 --no-kubernetes --memory=3072 --alsologtostderr -v=5 --driver=kvm2 
E1123 08:49:07.497714   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/addons-085189/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
no_kubernetes_test.go:161: (dbg) Done: out/minikube-linux-amd64 start -p NoKubernetes-106041 --no-kubernetes --memory=3072 --alsologtostderr -v=5 --driver=kvm2 : (25.324258716s)
--- PASS: TestNoKubernetes/serial/Start (25.32s)

                                                
                                    
x
+
TestNetworkPlugins/group/auto/KubeletFlags (0.17s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/auto/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-amd64 ssh -p auto-564164 "pgrep -a kubelet"
I1123 08:49:21.093135   22148 config.go:182] Loaded profile config "auto-564164": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
--- PASS: TestNetworkPlugins/group/auto/KubeletFlags (0.17s)

                                                
                                    
x
+
TestNetworkPlugins/group/auto/NetCatPod (11.25s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/auto/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context auto-564164 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/auto/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:352: "netcat-cd4db9dbf-dh92l" [38f47dff-8ad4-4ca7-9815-8378622fadbf] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
helpers_test.go:352: "netcat-cd4db9dbf-dh92l" [38f47dff-8ad4-4ca7-9815-8378622fadbf] Running
net_test.go:163: (dbg) TestNetworkPlugins/group/auto/NetCatPod: app=netcat healthy within 11.005207857s
--- PASS: TestNetworkPlugins/group/auto/NetCatPod (11.25s)

                                                
                                    
x
+
TestNoKubernetes/serial/VerifyNok8sNoK8sDownloads (0s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/VerifyNok8sNoK8sDownloads
no_kubernetes_test.go:89: Checking cache directory: /home/jenkins/minikube-integration/21966-18241/.minikube/cache/linux/amd64/v0.0.0
--- PASS: TestNoKubernetes/serial/VerifyNok8sNoK8sDownloads (0.00s)

                                                
                                    
x
+
TestNoKubernetes/serial/VerifyK8sNotRunning (0.18s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/VerifyK8sNotRunning
no_kubernetes_test.go:172: (dbg) Run:  out/minikube-linux-amd64 ssh -p NoKubernetes-106041 "sudo systemctl is-active --quiet service kubelet"
no_kubernetes_test.go:172: (dbg) Non-zero exit: out/minikube-linux-amd64 ssh -p NoKubernetes-106041 "sudo systemctl is-active --quiet service kubelet": exit status 1 (175.046844ms)

                                                
                                                
** stderr ** 
	ssh: Process exited with status 4

                                                
                                                
** /stderr **
--- PASS: TestNoKubernetes/serial/VerifyK8sNotRunning (0.18s)

                                                
                                    
x
+
TestNoKubernetes/serial/ProfileList (34.47s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/ProfileList
no_kubernetes_test.go:194: (dbg) Run:  out/minikube-linux-amd64 profile list
no_kubernetes_test.go:194: (dbg) Done: out/minikube-linux-amd64 profile list: (18.824290806s)
no_kubernetes_test.go:204: (dbg) Run:  out/minikube-linux-amd64 profile list --output=json
no_kubernetes_test.go:204: (dbg) Done: out/minikube-linux-amd64 profile list --output=json: (15.646271446s)
--- PASS: TestNoKubernetes/serial/ProfileList (34.47s)

                                                
                                    
x
+
TestNetworkPlugins/group/auto/DNS (0.18s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/auto/DNS
net_test.go:175: (dbg) Run:  kubectl --context auto-564164 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/auto/DNS (0.18s)

                                                
                                    
x
+
TestNetworkPlugins/group/auto/Localhost (0.14s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/auto/Localhost
net_test.go:194: (dbg) Run:  kubectl --context auto-564164 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/auto/Localhost (0.14s)

                                                
                                    
x
+
TestNetworkPlugins/group/auto/HairPin (0.13s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/auto/HairPin
net_test.go:264: (dbg) Run:  kubectl --context auto-564164 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/auto/HairPin (0.13s)

                                                
                                    
x
+
TestNetworkPlugins/group/kindnet/Start (75.59s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kindnet/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-amd64 start -p kindnet-564164 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=kindnet --driver=kvm2 
net_test.go:112: (dbg) Done: out/minikube-linux-amd64 start -p kindnet-564164 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=kindnet --driver=kvm2 : (1m15.592704676s)
--- PASS: TestNetworkPlugins/group/kindnet/Start (75.59s)

                                                
                                    
x
+
TestPause/serial/Pause (0.7s)

                                                
                                                
=== RUN   TestPause/serial/Pause
pause_test.go:110: (dbg) Run:  out/minikube-linux-amd64 pause -p pause-005751 --alsologtostderr -v=5
--- PASS: TestPause/serial/Pause (0.70s)

                                                
                                    
x
+
TestPause/serial/VerifyStatus (0.29s)

                                                
                                                
=== RUN   TestPause/serial/VerifyStatus
status_test.go:76: (dbg) Run:  out/minikube-linux-amd64 status -p pause-005751 --output=json --layout=cluster
status_test.go:76: (dbg) Non-zero exit: out/minikube-linux-amd64 status -p pause-005751 --output=json --layout=cluster: exit status 2 (289.565934ms)

                                                
                                                
-- stdout --
	{"Name":"pause-005751","StatusCode":418,"StatusName":"Paused","Step":"Done","StepDetail":"* Paused 12 containers in: kube-system, kubernetes-dashboard, istio-operator","BinaryVersion":"v1.37.0","Components":{"kubeconfig":{"Name":"kubeconfig","StatusCode":200,"StatusName":"OK"}},"Nodes":[{"Name":"pause-005751","StatusCode":200,"StatusName":"OK","Components":{"apiserver":{"Name":"apiserver","StatusCode":418,"StatusName":"Paused"},"kubelet":{"Name":"kubelet","StatusCode":405,"StatusName":"Stopped"}}}]}

                                                
                                                
-- /stdout --
--- PASS: TestPause/serial/VerifyStatus (0.29s)

                                                
                                    
x
+
TestPause/serial/Unpause (0.73s)

                                                
                                                
=== RUN   TestPause/serial/Unpause
pause_test.go:121: (dbg) Run:  out/minikube-linux-amd64 unpause -p pause-005751 --alsologtostderr -v=5
--- PASS: TestPause/serial/Unpause (0.73s)

                                                
                                    
x
+
TestNoKubernetes/serial/Stop (1.47s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/Stop
no_kubernetes_test.go:183: (dbg) Run:  out/minikube-linux-amd64 stop -p NoKubernetes-106041
no_kubernetes_test.go:183: (dbg) Done: out/minikube-linux-amd64 stop -p NoKubernetes-106041: (1.473496164s)
--- PASS: TestNoKubernetes/serial/Stop (1.47s)

                                                
                                    
x
+
TestPause/serial/PauseAgain (0.87s)

                                                
                                                
=== RUN   TestPause/serial/PauseAgain
pause_test.go:110: (dbg) Run:  out/minikube-linux-amd64 pause -p pause-005751 --alsologtostderr -v=5
--- PASS: TestPause/serial/PauseAgain (0.87s)

                                                
                                    
x
+
TestPause/serial/DeletePaused (1.04s)

                                                
                                                
=== RUN   TestPause/serial/DeletePaused
pause_test.go:132: (dbg) Run:  out/minikube-linux-amd64 delete -p pause-005751 --alsologtostderr -v=5
pause_test.go:132: (dbg) Done: out/minikube-linux-amd64 delete -p pause-005751 --alsologtostderr -v=5: (1.043137179s)
--- PASS: TestPause/serial/DeletePaused (1.04s)

                                                
                                    
x
+
TestNoKubernetes/serial/StartNoArgs (28.1s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/StartNoArgs
no_kubernetes_test.go:216: (dbg) Run:  out/minikube-linux-amd64 start -p NoKubernetes-106041 --driver=kvm2 
no_kubernetes_test.go:216: (dbg) Done: out/minikube-linux-amd64 start -p NoKubernetes-106041 --driver=kvm2 : (28.099263481s)
--- PASS: TestNoKubernetes/serial/StartNoArgs (28.10s)

                                                
                                    
x
+
TestPause/serial/VerifyDeletedResources (0.48s)

                                                
                                                
=== RUN   TestPause/serial/VerifyDeletedResources
pause_test.go:142: (dbg) Run:  out/minikube-linux-amd64 profile list --output json
--- PASS: TestPause/serial/VerifyDeletedResources (0.48s)

                                                
                                    
x
+
TestNetworkPlugins/group/calico/Start (114.29s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/calico/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-amd64 start -p calico-564164 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=calico --driver=kvm2 
net_test.go:112: (dbg) Done: out/minikube-linux-amd64 start -p calico-564164 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=calico --driver=kvm2 : (1m54.285951683s)
--- PASS: TestNetworkPlugins/group/calico/Start (114.29s)

                                                
                                    
x
+
TestNetworkPlugins/group/custom-flannel/Start (99.07s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/custom-flannel/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-amd64 start -p custom-flannel-564164 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=testdata/kube-flannel.yaml --driver=kvm2 
E1123 08:50:27.034890   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/gvisor-350074/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:50:27.041371   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/gvisor-350074/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:50:27.052871   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/gvisor-350074/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:50:27.074406   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/gvisor-350074/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:50:27.115894   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/gvisor-350074/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:50:27.197369   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/gvisor-350074/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:50:27.358948   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/gvisor-350074/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:50:27.681205   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/gvisor-350074/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:50:28.323251   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/gvisor-350074/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:50:29.605485   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/gvisor-350074/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:50:32.166975   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/gvisor-350074/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
net_test.go:112: (dbg) Done: out/minikube-linux-amd64 start -p custom-flannel-564164 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=testdata/kube-flannel.yaml --driver=kvm2 : (1m39.065678037s)
--- PASS: TestNetworkPlugins/group/custom-flannel/Start (99.07s)

                                                
                                    
x
+
TestNoKubernetes/serial/VerifyK8sNotRunningSecond (0.18s)

                                                
                                                
=== RUN   TestNoKubernetes/serial/VerifyK8sNotRunningSecond
no_kubernetes_test.go:172: (dbg) Run:  out/minikube-linux-amd64 ssh -p NoKubernetes-106041 "sudo systemctl is-active --quiet service kubelet"
no_kubernetes_test.go:172: (dbg) Non-zero exit: out/minikube-linux-amd64 ssh -p NoKubernetes-106041 "sudo systemctl is-active --quiet service kubelet": exit status 1 (175.257104ms)

                                                
                                                
** stderr ** 
	ssh: Process exited with status 4

                                                
                                                
** /stderr **
--- PASS: TestNoKubernetes/serial/VerifyK8sNotRunningSecond (0.18s)

                                                
                                    
x
+
TestNetworkPlugins/group/false/Start (132.12s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/false/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-amd64 start -p false-564164 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=false --driver=kvm2 
E1123 08:50:37.289251   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/gvisor-350074/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:50:47.531472   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/gvisor-350074/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:50:59.015733   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/functional-086932/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
net_test.go:112: (dbg) Done: out/minikube-linux-amd64 start -p false-564164 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=false --driver=kvm2 : (2m12.118349394s)
--- PASS: TestNetworkPlugins/group/false/Start (132.12s)

                                                
                                    
x
+
TestNetworkPlugins/group/kindnet/ControllerPod (6.01s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kindnet/ControllerPod
net_test.go:120: (dbg) TestNetworkPlugins/group/kindnet/ControllerPod: waiting 10m0s for pods matching "app=kindnet" in namespace "kube-system" ...
helpers_test.go:352: "kindnet-v8zsx" [deca1b7f-8a35-45f8-8119-1540dee3353a] Running
E1123 08:51:08.013795   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/gvisor-350074/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
net_test.go:120: (dbg) TestNetworkPlugins/group/kindnet/ControllerPod: app=kindnet healthy within 6.004687336s
--- PASS: TestNetworkPlugins/group/kindnet/ControllerPod (6.01s)

                                                
                                    
x
+
TestNetworkPlugins/group/kindnet/KubeletFlags (0.21s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kindnet/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-amd64 ssh -p kindnet-564164 "pgrep -a kubelet"
I1123 08:51:09.183104   22148 config.go:182] Loaded profile config "kindnet-564164": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
--- PASS: TestNetworkPlugins/group/kindnet/KubeletFlags (0.21s)

                                                
                                    
x
+
TestNetworkPlugins/group/kindnet/NetCatPod (12.26s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kindnet/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context kindnet-564164 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/kindnet/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:352: "netcat-cd4db9dbf-qnmjf" [ecedd1f5-f804-46fc-b6b0-a2a61595a0f1] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
helpers_test.go:352: "netcat-cd4db9dbf-qnmjf" [ecedd1f5-f804-46fc-b6b0-a2a61595a0f1] Running
net_test.go:163: (dbg) TestNetworkPlugins/group/kindnet/NetCatPod: app=netcat healthy within 12.004431646s
--- PASS: TestNetworkPlugins/group/kindnet/NetCatPod (12.26s)

                                                
                                    
x
+
TestNetworkPlugins/group/kindnet/DNS (0.17s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kindnet/DNS
net_test.go:175: (dbg) Run:  kubectl --context kindnet-564164 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/kindnet/DNS (0.17s)

                                                
                                    
x
+
TestNetworkPlugins/group/kindnet/Localhost (0.2s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kindnet/Localhost
net_test.go:194: (dbg) Run:  kubectl --context kindnet-564164 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/kindnet/Localhost (0.20s)

                                                
                                    
x
+
TestNetworkPlugins/group/kindnet/HairPin (0.19s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kindnet/HairPin
net_test.go:264: (dbg) Run:  kubectl --context kindnet-564164 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/kindnet/HairPin (0.19s)

                                                
                                    
x
+
TestNetworkPlugins/group/enable-default-cni/Start (99.73s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/enable-default-cni/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-amd64 start -p enable-default-cni-564164 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --enable-default-cni=true --driver=kvm2 
E1123 08:51:48.975613   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/gvisor-350074/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:51:58.728788   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/skaffold-153557/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
net_test.go:112: (dbg) Done: out/minikube-linux-amd64 start -p enable-default-cni-564164 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --enable-default-cni=true --driver=kvm2 : (1m39.734560032s)
--- PASS: TestNetworkPlugins/group/enable-default-cni/Start (99.73s)

                                                
                                    
x
+
TestNetworkPlugins/group/calico/ControllerPod (6.01s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/calico/ControllerPod
net_test.go:120: (dbg) TestNetworkPlugins/group/calico/ControllerPod: waiting 10m0s for pods matching "k8s-app=calico-node" in namespace "kube-system" ...
helpers_test.go:352: "calico-node-88k7q" [e90c7708-784c-4f7b-ad00-430225b71eda] Running / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node])
helpers_test.go:352: "calico-node-88k7q" [e90c7708-784c-4f7b-ad00-430225b71eda] Running
net_test.go:120: (dbg) TestNetworkPlugins/group/calico/ControllerPod: k8s-app=calico-node healthy within 6.00575791s
--- PASS: TestNetworkPlugins/group/calico/ControllerPod (6.01s)

                                                
                                    
x
+
TestNetworkPlugins/group/custom-flannel/KubeletFlags (0.2s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/custom-flannel/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-amd64 ssh -p custom-flannel-564164 "pgrep -a kubelet"
I1123 08:52:02.700609   22148 config.go:182] Loaded profile config "custom-flannel-564164": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
--- PASS: TestNetworkPlugins/group/custom-flannel/KubeletFlags (0.20s)

                                                
                                    
x
+
TestNetworkPlugins/group/custom-flannel/NetCatPod (12.27s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/custom-flannel/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context custom-flannel-564164 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/custom-flannel/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:352: "netcat-cd4db9dbf-sdfv9" [174a32fb-3c00-405b-ac48-79cdcf56dc4a] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
helpers_test.go:352: "netcat-cd4db9dbf-sdfv9" [174a32fb-3c00-405b-ac48-79cdcf56dc4a] Running
net_test.go:163: (dbg) TestNetworkPlugins/group/custom-flannel/NetCatPod: app=netcat healthy within 12.005257476s
--- PASS: TestNetworkPlugins/group/custom-flannel/NetCatPod (12.27s)

                                                
                                    
x
+
TestNetworkPlugins/group/calico/KubeletFlags (0.22s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/calico/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-amd64 ssh -p calico-564164 "pgrep -a kubelet"
I1123 08:52:06.165781   22148 config.go:182] Loaded profile config "calico-564164": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
--- PASS: TestNetworkPlugins/group/calico/KubeletFlags (0.22s)

                                                
                                    
x
+
TestNetworkPlugins/group/calico/NetCatPod (20.33s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/calico/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context calico-564164 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/calico/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:352: "netcat-cd4db9dbf-jm2tp" [bf466bd0-9136-443d-a737-963ae1267b88] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
helpers_test.go:352: "netcat-cd4db9dbf-jm2tp" [bf466bd0-9136-443d-a737-963ae1267b88] Running
E1123 08:52:26.432678   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/skaffold-153557/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
net_test.go:163: (dbg) TestNetworkPlugins/group/calico/NetCatPod: app=netcat healthy within 20.007043118s
--- PASS: TestNetworkPlugins/group/calico/NetCatPod (20.33s)

                                                
                                    
x
+
TestNetworkPlugins/group/custom-flannel/DNS (0.25s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/custom-flannel/DNS
net_test.go:175: (dbg) Run:  kubectl --context custom-flannel-564164 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/custom-flannel/DNS (0.25s)

                                                
                                    
x
+
TestNetworkPlugins/group/custom-flannel/Localhost (0.19s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/custom-flannel/Localhost
net_test.go:194: (dbg) Run:  kubectl --context custom-flannel-564164 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/custom-flannel/Localhost (0.19s)

                                                
                                    
x
+
TestNetworkPlugins/group/custom-flannel/HairPin (0.2s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/custom-flannel/HairPin
net_test.go:264: (dbg) Run:  kubectl --context custom-flannel-564164 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/custom-flannel/HairPin (0.20s)

                                                
                                    
x
+
TestNetworkPlugins/group/calico/DNS (0.23s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/calico/DNS
net_test.go:175: (dbg) Run:  kubectl --context calico-564164 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/calico/DNS (0.23s)

                                                
                                    
x
+
TestNetworkPlugins/group/calico/Localhost (0.18s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/calico/Localhost
net_test.go:194: (dbg) Run:  kubectl --context calico-564164 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/calico/Localhost (0.18s)

                                                
                                    
x
+
TestNetworkPlugins/group/calico/HairPin (0.18s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/calico/HairPin
net_test.go:264: (dbg) Run:  kubectl --context calico-564164 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/calico/HairPin (0.18s)

                                                
                                    
x
+
TestNetworkPlugins/group/flannel/Start (69.54s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/flannel/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-amd64 start -p flannel-564164 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=flannel --driver=kvm2 
net_test.go:112: (dbg) Done: out/minikube-linux-amd64 start -p flannel-564164 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=flannel --driver=kvm2 : (1m9.534137035s)
--- PASS: TestNetworkPlugins/group/flannel/Start (69.54s)

                                                
                                    
x
+
TestNetworkPlugins/group/bridge/Start (102.33s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/bridge/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-amd64 start -p bridge-564164 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=bridge --driver=kvm2 
net_test.go:112: (dbg) Done: out/minikube-linux-amd64 start -p bridge-564164 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=bridge --driver=kvm2 : (1m42.333390375s)
--- PASS: TestNetworkPlugins/group/bridge/Start (102.33s)

                                                
                                    
x
+
TestNetworkPlugins/group/false/KubeletFlags (0.21s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/false/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-amd64 ssh -p false-564164 "pgrep -a kubelet"
I1123 08:52:45.697559   22148 config.go:182] Loaded profile config "false-564164": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
--- PASS: TestNetworkPlugins/group/false/KubeletFlags (0.21s)

                                                
                                    
x
+
TestNetworkPlugins/group/false/NetCatPod (12.25s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/false/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context false-564164 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/false/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:352: "netcat-cd4db9dbf-6mx4c" [bbb88074-321a-4a4c-8504-334e8c8d146c] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
helpers_test.go:352: "netcat-cd4db9dbf-6mx4c" [bbb88074-321a-4a4c-8504-334e8c8d146c] Running
net_test.go:163: (dbg) TestNetworkPlugins/group/false/NetCatPod: app=netcat healthy within 12.005084384s
--- PASS: TestNetworkPlugins/group/false/NetCatPod (12.25s)

                                                
                                    
x
+
TestNetworkPlugins/group/false/DNS (0.23s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/false/DNS
net_test.go:175: (dbg) Run:  kubectl --context false-564164 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/false/DNS (0.23s)

                                                
                                    
x
+
TestNetworkPlugins/group/false/Localhost (0.17s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/false/Localhost
net_test.go:194: (dbg) Run:  kubectl --context false-564164 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/false/Localhost (0.17s)

                                                
                                    
x
+
TestNetworkPlugins/group/false/HairPin (0.18s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/false/HairPin
net_test.go:264: (dbg) Run:  kubectl --context false-564164 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/false/HairPin (0.18s)

                                                
                                    
x
+
TestNetworkPlugins/group/kubenet/Start (95.32s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kubenet/Start
net_test.go:112: (dbg) Run:  out/minikube-linux-amd64 start -p kubenet-564164 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --network-plugin=kubenet --driver=kvm2 
net_test.go:112: (dbg) Done: out/minikube-linux-amd64 start -p kubenet-564164 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --network-plugin=kubenet --driver=kvm2 : (1m35.321614866s)
--- PASS: TestNetworkPlugins/group/kubenet/Start (95.32s)

                                                
                                    
x
+
TestNetworkPlugins/group/enable-default-cni/KubeletFlags (0.21s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/enable-default-cni/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-amd64 ssh -p enable-default-cni-564164 "pgrep -a kubelet"
I1123 08:53:18.392584   22148 config.go:182] Loaded profile config "enable-default-cni-564164": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
--- PASS: TestNetworkPlugins/group/enable-default-cni/KubeletFlags (0.21s)

                                                
                                    
x
+
TestNetworkPlugins/group/enable-default-cni/NetCatPod (14.26s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/enable-default-cni/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context enable-default-cni-564164 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/enable-default-cni/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:352: "netcat-cd4db9dbf-x4mrx" [756ba9ac-db61-45e4-af13-e267848a6a1e] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
helpers_test.go:352: "netcat-cd4db9dbf-x4mrx" [756ba9ac-db61-45e4-af13-e267848a6a1e] Running
net_test.go:163: (dbg) TestNetworkPlugins/group/enable-default-cni/NetCatPod: app=netcat healthy within 14.005352112s
--- PASS: TestNetworkPlugins/group/enable-default-cni/NetCatPod (14.26s)

                                                
                                    
x
+
TestNetworkPlugins/group/enable-default-cni/DNS (0.27s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/enable-default-cni/DNS
net_test.go:175: (dbg) Run:  kubectl --context enable-default-cni-564164 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/enable-default-cni/DNS (0.27s)

                                                
                                    
x
+
TestNetworkPlugins/group/enable-default-cni/Localhost (0.15s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/enable-default-cni/Localhost
net_test.go:194: (dbg) Run:  kubectl --context enable-default-cni-564164 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/enable-default-cni/Localhost (0.15s)

                                                
                                    
x
+
TestNetworkPlugins/group/enable-default-cni/HairPin (0.15s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/enable-default-cni/HairPin
net_test.go:264: (dbg) Run:  kubectl --context enable-default-cni-564164 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/enable-default-cni/HairPin (0.15s)

                                                
                                    
x
+
TestNetworkPlugins/group/flannel/ControllerPod (6.01s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/flannel/ControllerPod
net_test.go:120: (dbg) TestNetworkPlugins/group/flannel/ControllerPod: waiting 10m0s for pods matching "app=flannel" in namespace "kube-flannel" ...
helpers_test.go:352: "kube-flannel-ds-pnb47" [78a85546-c24e-4a8f-b413-5d006bd66e53] Running
net_test.go:120: (dbg) TestNetworkPlugins/group/flannel/ControllerPod: app=flannel healthy within 6.005730333s
--- PASS: TestNetworkPlugins/group/flannel/ControllerPod (6.01s)

                                                
                                    
x
+
TestNetworkPlugins/group/flannel/KubeletFlags (0.23s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/flannel/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-amd64 ssh -p flannel-564164 "pgrep -a kubelet"
I1123 08:53:47.383175   22148 config.go:182] Loaded profile config "flannel-564164": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
--- PASS: TestNetworkPlugins/group/flannel/KubeletFlags (0.23s)

                                                
                                    
x
+
TestNetworkPlugins/group/flannel/NetCatPod (13.56s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/flannel/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context flannel-564164 replace --force -f testdata/netcat-deployment.yaml
I1123 08:53:47.866765   22148 kapi.go:136] Waiting for deployment netcat to stabilize, generation 1 observed generation 1 spec.replicas 1 status.replicas 0
net_test.go:163: (dbg) TestNetworkPlugins/group/flannel/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:352: "netcat-cd4db9dbf-95b4t" [e363949e-db9f-4493-8685-0e3eb84b94f6] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
helpers_test.go:352: "netcat-cd4db9dbf-95b4t" [e363949e-db9f-4493-8685-0e3eb84b94f6] Running
net_test.go:163: (dbg) TestNetworkPlugins/group/flannel/NetCatPod: app=netcat healthy within 13.007942156s
--- PASS: TestNetworkPlugins/group/flannel/NetCatPod (13.56s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/FirstStart (102.02s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/FirstStart
start_stop_delete_test.go:184: (dbg) Run:  out/minikube-linux-amd64 start -p old-k8s-version-896471 --memory=3072 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=kvm2  --kubernetes-version=v1.28.0
start_stop_delete_test.go:184: (dbg) Done: out/minikube-linux-amd64 start -p old-k8s-version-896471 --memory=3072 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=kvm2  --kubernetes-version=v1.28.0: (1m42.023592461s)
--- PASS: TestStartStop/group/old-k8s-version/serial/FirstStart (102.02s)

                                                
                                    
x
+
TestNetworkPlugins/group/flannel/DNS (0.19s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/flannel/DNS
net_test.go:175: (dbg) Run:  kubectl --context flannel-564164 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/flannel/DNS (0.19s)

                                                
                                    
x
+
TestNetworkPlugins/group/flannel/Localhost (0.2s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/flannel/Localhost
net_test.go:194: (dbg) Run:  kubectl --context flannel-564164 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/flannel/Localhost (0.20s)

                                                
                                    
x
+
TestNetworkPlugins/group/flannel/HairPin (0.16s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/flannel/HairPin
net_test.go:264: (dbg) Run:  kubectl --context flannel-564164 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/flannel/HairPin (0.16s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/FirstStart (107.71s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/FirstStart
start_stop_delete_test.go:184: (dbg) Run:  out/minikube-linux-amd64 start -p no-preload-019660 --memory=3072 --alsologtostderr --wait=true --preload=false --driver=kvm2  --kubernetes-version=v1.34.1
E1123 08:54:21.315801   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/auto-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:54:21.323078   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/auto-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:54:21.336254   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/auto-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:54:21.357796   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/auto-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:54:21.399365   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/auto-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:54:21.481561   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/auto-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:54:21.643939   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/auto-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:54:21.965687   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/auto-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:54:22.607660   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/auto-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:54:23.889022   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/auto-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:184: (dbg) Done: out/minikube-linux-amd64 start -p no-preload-019660 --memory=3072 --alsologtostderr --wait=true --preload=false --driver=kvm2  --kubernetes-version=v1.34.1: (1m47.714323207s)
--- PASS: TestStartStop/group/no-preload/serial/FirstStart (107.71s)

                                                
                                    
x
+
TestNetworkPlugins/group/bridge/KubeletFlags (0.2s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/bridge/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-amd64 ssh -p bridge-564164 "pgrep -a kubelet"
I1123 08:54:26.386117   22148 config.go:182] Loaded profile config "bridge-564164": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
--- PASS: TestNetworkPlugins/group/bridge/KubeletFlags (0.20s)

                                                
                                    
x
+
TestNetworkPlugins/group/bridge/NetCatPod (12.31s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/bridge/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context bridge-564164 replace --force -f testdata/netcat-deployment.yaml
E1123 08:54:26.450689   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/auto-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
net_test.go:163: (dbg) TestNetworkPlugins/group/bridge/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:352: "netcat-cd4db9dbf-ltpfk" [f47316ef-3c35-4439-838a-ae376eee0819] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
E1123 08:54:31.572127   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/auto-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
helpers_test.go:352: "netcat-cd4db9dbf-ltpfk" [f47316ef-3c35-4439-838a-ae376eee0819] Running
net_test.go:163: (dbg) TestNetworkPlugins/group/bridge/NetCatPod: app=netcat healthy within 12.003579957s
--- PASS: TestNetworkPlugins/group/bridge/NetCatPod (12.31s)

                                                
                                    
x
+
TestNetworkPlugins/group/bridge/DNS (0.19s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/bridge/DNS
net_test.go:175: (dbg) Run:  kubectl --context bridge-564164 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/bridge/DNS (0.19s)

                                                
                                    
x
+
TestNetworkPlugins/group/bridge/Localhost (0.16s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/bridge/Localhost
net_test.go:194: (dbg) Run:  kubectl --context bridge-564164 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/bridge/Localhost (0.16s)

                                                
                                    
x
+
TestNetworkPlugins/group/bridge/HairPin (0.16s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/bridge/HairPin
net_test.go:264: (dbg) Run:  kubectl --context bridge-564164 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/bridge/HairPin (0.16s)

                                                
                                    
x
+
TestNetworkPlugins/group/kubenet/KubeletFlags (0.22s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kubenet/KubeletFlags
net_test.go:133: (dbg) Run:  out/minikube-linux-amd64 ssh -p kubenet-564164 "pgrep -a kubelet"
I1123 08:54:49.383603   22148 config.go:182] Loaded profile config "kubenet-564164": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.34.1
--- PASS: TestNetworkPlugins/group/kubenet/KubeletFlags (0.22s)

                                                
                                    
x
+
TestNetworkPlugins/group/kubenet/NetCatPod (11.31s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kubenet/NetCatPod
net_test.go:149: (dbg) Run:  kubectl --context kubenet-564164 replace --force -f testdata/netcat-deployment.yaml
net_test.go:163: (dbg) TestNetworkPlugins/group/kubenet/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ...
helpers_test.go:352: "netcat-cd4db9dbf-p9nzz" [f42deafc-cc00-43d8-8209-0269e0fb0882] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils])
helpers_test.go:352: "netcat-cd4db9dbf-p9nzz" [f42deafc-cc00-43d8-8209-0269e0fb0882] Running
net_test.go:163: (dbg) TestNetworkPlugins/group/kubenet/NetCatPod: app=netcat healthy within 11.005353639s
--- PASS: TestNetworkPlugins/group/kubenet/NetCatPod (11.31s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/FirstStart (97s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/FirstStart
start_stop_delete_test.go:184: (dbg) Run:  out/minikube-linux-amd64 start -p embed-certs-059363 --memory=3072 --alsologtostderr --wait=true --embed-certs --driver=kvm2  --kubernetes-version=v1.34.1
start_stop_delete_test.go:184: (dbg) Done: out/minikube-linux-amd64 start -p embed-certs-059363 --memory=3072 --alsologtostderr --wait=true --embed-certs --driver=kvm2  --kubernetes-version=v1.34.1: (1m37.001062614s)
--- PASS: TestStartStop/group/embed-certs/serial/FirstStart (97.00s)

                                                
                                    
x
+
TestNetworkPlugins/group/kubenet/DNS (0.18s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kubenet/DNS
net_test.go:175: (dbg) Run:  kubectl --context kubenet-564164 exec deployment/netcat -- nslookup kubernetes.default
--- PASS: TestNetworkPlugins/group/kubenet/DNS (0.18s)

                                                
                                    
x
+
TestNetworkPlugins/group/kubenet/Localhost (0.14s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kubenet/Localhost
net_test.go:194: (dbg) Run:  kubectl --context kubenet-564164 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080"
--- PASS: TestNetworkPlugins/group/kubenet/Localhost (0.14s)

                                                
                                    
x
+
TestNetworkPlugins/group/kubenet/HairPin (0.18s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/kubenet/HairPin
net_test.go:264: (dbg) Run:  kubectl --context kubenet-564164 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080"
--- PASS: TestNetworkPlugins/group/kubenet/HairPin (0.18s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/FirstStart (93.56s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/FirstStart
start_stop_delete_test.go:184: (dbg) Run:  out/minikube-linux-amd64 start -p default-k8s-diff-port-925051 --memory=3072 --alsologtostderr --wait=true --apiserver-port=8444 --driver=kvm2  --kubernetes-version=v1.34.1
E1123 08:55:27.035399   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/gvisor-350074/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:184: (dbg) Done: out/minikube-linux-amd64 start -p default-k8s-diff-port-925051 --memory=3072 --alsologtostderr --wait=true --apiserver-port=8444 --driver=kvm2  --kubernetes-version=v1.34.1: (1m33.55807392s)
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/FirstStart (93.56s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/DeployApp (10.41s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/DeployApp
start_stop_delete_test.go:194: (dbg) Run:  kubectl --context old-k8s-version-896471 create -f testdata/busybox.yaml
start_stop_delete_test.go:194: (dbg) TestStartStop/group/old-k8s-version/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ...
helpers_test.go:352: "busybox" [eca45774-fb95-40db-897f-f034182b36f2] Pending
helpers_test.go:352: "busybox" [eca45774-fb95-40db-897f-f034182b36f2] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox])
helpers_test.go:352: "busybox" [eca45774-fb95-40db-897f-f034182b36f2] Running
E1123 08:55:43.258058   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/auto-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:194: (dbg) TestStartStop/group/old-k8s-version/serial/DeployApp: integration-test=busybox healthy within 10.003888528s
start_stop_delete_test.go:194: (dbg) Run:  kubectl --context old-k8s-version-896471 exec busybox -- /bin/sh -c "ulimit -n"
--- PASS: TestStartStop/group/old-k8s-version/serial/DeployApp (10.41s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/EnableAddonWhileActive (1.31s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/EnableAddonWhileActive
start_stop_delete_test.go:203: (dbg) Run:  out/minikube-linux-amd64 addons enable metrics-server -p old-k8s-version-896471 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain
start_stop_delete_test.go:203: (dbg) Done: out/minikube-linux-amd64 addons enable metrics-server -p old-k8s-version-896471 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain: (1.218575308s)
start_stop_delete_test.go:213: (dbg) Run:  kubectl --context old-k8s-version-896471 describe deploy/metrics-server -n kube-system
--- PASS: TestStartStop/group/old-k8s-version/serial/EnableAddonWhileActive (1.31s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/Stop (14.33s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/Stop
start_stop_delete_test.go:226: (dbg) Run:  out/minikube-linux-amd64 stop -p old-k8s-version-896471 --alsologtostderr -v=3
E1123 08:55:54.738683   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/gvisor-350074/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:55:59.016027   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/functional-086932/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:226: (dbg) Done: out/minikube-linux-amd64 stop -p old-k8s-version-896471 --alsologtostderr -v=3: (14.326038317s)
--- PASS: TestStartStop/group/old-k8s-version/serial/Stop (14.33s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/EnableAddonAfterStop (0.16s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/EnableAddonAfterStop
start_stop_delete_test.go:237: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Host}} -p old-k8s-version-896471 -n old-k8s-version-896471
start_stop_delete_test.go:237: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.Host}} -p old-k8s-version-896471 -n old-k8s-version-896471: exit status 7 (66.392851ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:237: status error: exit status 7 (may be ok)
start_stop_delete_test.go:244: (dbg) Run:  out/minikube-linux-amd64 addons enable dashboard -p old-k8s-version-896471 --images=MetricsScraper=registry.k8s.io/echoserver:1.4
--- PASS: TestStartStop/group/old-k8s-version/serial/EnableAddonAfterStop (0.16s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/SecondStart (45.54s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/SecondStart
start_stop_delete_test.go:254: (dbg) Run:  out/minikube-linux-amd64 start -p old-k8s-version-896471 --memory=3072 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=kvm2  --kubernetes-version=v1.28.0
E1123 08:56:02.972141   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/kindnet-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:56:02.978581   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/kindnet-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:56:02.990014   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/kindnet-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:56:03.011500   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/kindnet-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:56:03.052980   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/kindnet-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:56:03.134482   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/kindnet-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:56:03.296019   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/kindnet-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:56:03.617616   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/kindnet-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:56:04.259047   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/kindnet-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:254: (dbg) Done: out/minikube-linux-amd64 start -p old-k8s-version-896471 --memory=3072 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=kvm2  --kubernetes-version=v1.28.0: (45.221007679s)
start_stop_delete_test.go:260: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Host}} -p old-k8s-version-896471 -n old-k8s-version-896471
--- PASS: TestStartStop/group/old-k8s-version/serial/SecondStart (45.54s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/DeployApp (9.38s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/DeployApp
start_stop_delete_test.go:194: (dbg) Run:  kubectl --context no-preload-019660 create -f testdata/busybox.yaml
start_stop_delete_test.go:194: (dbg) TestStartStop/group/no-preload/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ...
helpers_test.go:352: "busybox" [8eff63a6-482a-4d62-bf93-04f9d9d43064] Pending
E1123 08:56:05.540941   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/kindnet-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
helpers_test.go:352: "busybox" [8eff63a6-482a-4d62-bf93-04f9d9d43064] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox])
E1123 08:56:08.102875   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/kindnet-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
helpers_test.go:352: "busybox" [8eff63a6-482a-4d62-bf93-04f9d9d43064] Running
E1123 08:56:13.224904   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/kindnet-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:194: (dbg) TestStartStop/group/no-preload/serial/DeployApp: integration-test=busybox healthy within 9.006527502s
start_stop_delete_test.go:194: (dbg) Run:  kubectl --context no-preload-019660 exec busybox -- /bin/sh -c "ulimit -n"
--- PASS: TestStartStop/group/no-preload/serial/DeployApp (9.38s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/EnableAddonWhileActive (1.18s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/EnableAddonWhileActive
start_stop_delete_test.go:203: (dbg) Run:  out/minikube-linux-amd64 addons enable metrics-server -p no-preload-019660 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain
start_stop_delete_test.go:203: (dbg) Done: out/minikube-linux-amd64 addons enable metrics-server -p no-preload-019660 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain: (1.068888034s)
start_stop_delete_test.go:213: (dbg) Run:  kubectl --context no-preload-019660 describe deploy/metrics-server -n kube-system
--- PASS: TestStartStop/group/no-preload/serial/EnableAddonWhileActive (1.18s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/Stop (13.58s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/Stop
start_stop_delete_test.go:226: (dbg) Run:  out/minikube-linux-amd64 stop -p no-preload-019660 --alsologtostderr -v=3
E1123 08:56:23.466249   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/kindnet-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:226: (dbg) Done: out/minikube-linux-amd64 stop -p no-preload-019660 --alsologtostderr -v=3: (13.58377214s)
--- PASS: TestStartStop/group/no-preload/serial/Stop (13.58s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/EnableAddonAfterStop (0.17s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/EnableAddonAfterStop
start_stop_delete_test.go:237: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Host}} -p no-preload-019660 -n no-preload-019660
start_stop_delete_test.go:237: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.Host}} -p no-preload-019660 -n no-preload-019660: exit status 7 (79.327647ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:237: status error: exit status 7 (may be ok)
start_stop_delete_test.go:244: (dbg) Run:  out/minikube-linux-amd64 addons enable dashboard -p no-preload-019660 --images=MetricsScraper=registry.k8s.io/echoserver:1.4
--- PASS: TestStartStop/group/no-preload/serial/EnableAddonAfterStop (0.17s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/SecondStart (52.39s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/SecondStart
start_stop_delete_test.go:254: (dbg) Run:  out/minikube-linux-amd64 start -p no-preload-019660 --memory=3072 --alsologtostderr --wait=true --preload=false --driver=kvm2  --kubernetes-version=v1.34.1
start_stop_delete_test.go:254: (dbg) Done: out/minikube-linux-amd64 start -p no-preload-019660 --memory=3072 --alsologtostderr --wait=true --preload=false --driver=kvm2  --kubernetes-version=v1.34.1: (52.117416256s)
start_stop_delete_test.go:260: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Host}} -p no-preload-019660 -n no-preload-019660
--- PASS: TestStartStop/group/no-preload/serial/SecondStart (52.39s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/DeployApp (11.34s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/DeployApp
start_stop_delete_test.go:194: (dbg) Run:  kubectl --context embed-certs-059363 create -f testdata/busybox.yaml
start_stop_delete_test.go:194: (dbg) TestStartStop/group/embed-certs/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ...
helpers_test.go:352: "busybox" [aff505c3-299b-4ca2-8e11-2d2283ee8a6d] Pending
helpers_test.go:352: "busybox" [aff505c3-299b-4ca2-8e11-2d2283ee8a6d] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox])
helpers_test.go:352: "busybox" [aff505c3-299b-4ca2-8e11-2d2283ee8a6d] Running
start_stop_delete_test.go:194: (dbg) TestStartStop/group/embed-certs/serial/DeployApp: integration-test=busybox healthy within 11.004366703s
start_stop_delete_test.go:194: (dbg) Run:  kubectl --context embed-certs-059363 exec busybox -- /bin/sh -c "ulimit -n"
--- PASS: TestStartStop/group/embed-certs/serial/DeployApp (11.34s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/EnableAddonWhileActive (1.16s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/EnableAddonWhileActive
start_stop_delete_test.go:203: (dbg) Run:  out/minikube-linux-amd64 addons enable metrics-server -p embed-certs-059363 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain
E1123 08:56:43.948577   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/kindnet-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:203: (dbg) Done: out/minikube-linux-amd64 addons enable metrics-server -p embed-certs-059363 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain: (1.062400879s)
start_stop_delete_test.go:213: (dbg) Run:  kubectl --context embed-certs-059363 describe deploy/metrics-server -n kube-system
--- PASS: TestStartStop/group/embed-certs/serial/EnableAddonWhileActive (1.16s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/Stop (13.94s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/Stop
start_stop_delete_test.go:226: (dbg) Run:  out/minikube-linux-amd64 stop -p embed-certs-059363 --alsologtostderr -v=3
start_stop_delete_test.go:226: (dbg) Done: out/minikube-linux-amd64 stop -p embed-certs-059363 --alsologtostderr -v=3: (13.944541204s)
--- PASS: TestStartStop/group/embed-certs/serial/Stop (13.94s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop (14.01s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop
start_stop_delete_test.go:272: (dbg) TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:352: "kubernetes-dashboard-8694d4445c-48sk4" [1a914a37-f302-4b48-a5ca-356475d89886] Pending / Ready:ContainersNotReady (containers with unready status: [kubernetes-dashboard]) / ContainersReady:ContainersNotReady (containers with unready status: [kubernetes-dashboard])
helpers_test.go:352: "kubernetes-dashboard-8694d4445c-48sk4" [1a914a37-f302-4b48-a5ca-356475d89886] Running
start_stop_delete_test.go:272: (dbg) TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 14.005837s
--- PASS: TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop (14.01s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/DeployApp (11.35s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/DeployApp
start_stop_delete_test.go:194: (dbg) Run:  kubectl --context default-k8s-diff-port-925051 create -f testdata/busybox.yaml
start_stop_delete_test.go:194: (dbg) TestStartStop/group/default-k8s-diff-port/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ...
helpers_test.go:352: "busybox" [cf62194c-d450-4af3-8c3c-8c0362ecbf10] Pending
helpers_test.go:352: "busybox" [cf62194c-d450-4af3-8c3c-8c0362ecbf10] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox])
helpers_test.go:352: "busybox" [cf62194c-d450-4af3-8c3c-8c0362ecbf10] Running
start_stop_delete_test.go:194: (dbg) TestStartStop/group/default-k8s-diff-port/serial/DeployApp: integration-test=busybox healthy within 11.004743306s
start_stop_delete_test.go:194: (dbg) Run:  kubectl --context default-k8s-diff-port-925051 exec busybox -- /bin/sh -c "ulimit -n"
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/DeployApp (11.35s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/EnableAddonAfterStop (0.17s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/EnableAddonAfterStop
start_stop_delete_test.go:237: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Host}} -p embed-certs-059363 -n embed-certs-059363
start_stop_delete_test.go:237: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.Host}} -p embed-certs-059363 -n embed-certs-059363: exit status 7 (76.50358ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:237: status error: exit status 7 (may be ok)
start_stop_delete_test.go:244: (dbg) Run:  out/minikube-linux-amd64 addons enable dashboard -p embed-certs-059363 --images=MetricsScraper=registry.k8s.io/echoserver:1.4
--- PASS: TestStartStop/group/embed-certs/serial/EnableAddonAfterStop (0.17s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/SecondStart (52.64s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/SecondStart
start_stop_delete_test.go:254: (dbg) Run:  out/minikube-linux-amd64 start -p embed-certs-059363 --memory=3072 --alsologtostderr --wait=true --embed-certs --driver=kvm2  --kubernetes-version=v1.34.1
E1123 08:56:58.729895   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/skaffold-153557/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:254: (dbg) Done: out/minikube-linux-amd64 start -p embed-certs-059363 --memory=3072 --alsologtostderr --wait=true --embed-certs --driver=kvm2  --kubernetes-version=v1.34.1: (52.361537284s)
start_stop_delete_test.go:260: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Host}} -p embed-certs-059363 -n embed-certs-059363
E1123 08:57:51.053591   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/false-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
--- PASS: TestStartStop/group/embed-certs/serial/SecondStart (52.64s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop (5.09s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop
start_stop_delete_test.go:285: (dbg) TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:352: "kubernetes-dashboard-8694d4445c-48sk4" [1a914a37-f302-4b48-a5ca-356475d89886] Running
E1123 08:56:59.945311   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/calico-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:56:59.951716   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/calico-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:56:59.963128   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/calico-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:56:59.984567   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/calico-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:57:00.026037   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/calico-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:57:00.107527   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/calico-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:57:00.269553   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/calico-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:57:00.591301   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/calico-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:57:01.233465   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/calico-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:285: (dbg) TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 5.005377447s
start_stop_delete_test.go:289: (dbg) Run:  kubectl --context old-k8s-version-896471 describe deploy/dashboard-metrics-scraper -n kubernetes-dashboard
--- PASS: TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop (5.09s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/EnableAddonWhileActive (1.29s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/EnableAddonWhileActive
start_stop_delete_test.go:203: (dbg) Run:  out/minikube-linux-amd64 addons enable metrics-server -p default-k8s-diff-port-925051 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain
E1123 08:57:02.514804   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/calico-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:203: (dbg) Done: out/minikube-linux-amd64 addons enable metrics-server -p default-k8s-diff-port-925051 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain: (1.170303503s)
start_stop_delete_test.go:213: (dbg) Run:  kubectl --context default-k8s-diff-port-925051 describe deploy/metrics-server -n kube-system
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/EnableAddonWhileActive (1.29s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/Stop (13.7s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/Stop
start_stop_delete_test.go:226: (dbg) Run:  out/minikube-linux-amd64 stop -p default-k8s-diff-port-925051 --alsologtostderr -v=3
E1123 08:57:02.954630   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/custom-flannel-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:57:02.961113   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/custom-flannel-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:57:02.972958   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/custom-flannel-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:57:02.995204   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/custom-flannel-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:57:03.036570   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/custom-flannel-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:57:03.118354   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/custom-flannel-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:57:03.280574   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/custom-flannel-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:57:03.602877   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/custom-flannel-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:57:04.245312   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/custom-flannel-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:226: (dbg) Done: out/minikube-linux-amd64 stop -p default-k8s-diff-port-925051 --alsologtostderr -v=3: (13.698454522s)
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/Stop (13.70s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/VerifyKubernetesImages (0.24s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/VerifyKubernetesImages
start_stop_delete_test.go:302: (dbg) Run:  out/minikube-linux-amd64 -p old-k8s-version-896471 image list --format=json
start_stop_delete_test.go:302: Found non-minikube image: gcr.io/k8s-minikube/gvisor-addon:2
start_stop_delete_test.go:302: Found non-minikube image: gcr.io/k8s-minikube/busybox:1.28.4-glibc
--- PASS: TestStartStop/group/old-k8s-version/serial/VerifyKubernetesImages (0.24s)

                                                
                                    
x
+
TestStartStop/group/old-k8s-version/serial/Pause (2.99s)

                                                
                                                
=== RUN   TestStartStop/group/old-k8s-version/serial/Pause
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-amd64 pause -p old-k8s-version-896471 --alsologtostderr -v=1
E1123 08:57:05.076580   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/calico-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:57:05.180492   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/auto-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:57:05.526978   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/custom-flannel-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-amd64 status --format={{.APIServer}} -p old-k8s-version-896471 -n old-k8s-version-896471
start_stop_delete_test.go:309: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.APIServer}} -p old-k8s-version-896471 -n old-k8s-version-896471: exit status 2 (255.642828ms)

                                                
                                                
-- stdout --
	Paused

                                                
                                                
-- /stdout --
start_stop_delete_test.go:309: status error: exit status 2 (may be ok)
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Kubelet}} -p old-k8s-version-896471 -n old-k8s-version-896471
start_stop_delete_test.go:309: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.Kubelet}} -p old-k8s-version-896471 -n old-k8s-version-896471: exit status 2 (250.832747ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:309: status error: exit status 2 (may be ok)
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-amd64 unpause -p old-k8s-version-896471 --alsologtostderr -v=1
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-amd64 status --format={{.APIServer}} -p old-k8s-version-896471 -n old-k8s-version-896471
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Kubelet}} -p old-k8s-version-896471 -n old-k8s-version-896471
--- PASS: TestStartStop/group/old-k8s-version/serial/Pause (2.99s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/FirstStart (67.45s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/FirstStart
start_stop_delete_test.go:184: (dbg) Run:  out/minikube-linux-amd64 start -p newest-cni-078196 --memory=3072 --alsologtostderr --wait=apiserver,system_pods,default_sa --network-plugin=cni --extra-config=kubeadm.pod-network-cidr=10.42.0.0/16 --driver=kvm2  --kubernetes-version=v1.34.1
E1123 08:57:10.199269   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/calico-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:57:13.210126   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/custom-flannel-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:184: (dbg) Done: out/minikube-linux-amd64 start -p newest-cni-078196 --memory=3072 --alsologtostderr --wait=apiserver,system_pods,default_sa --network-plugin=cni --extra-config=kubeadm.pod-network-cidr=10.42.0.0/16 --driver=kvm2  --kubernetes-version=v1.34.1: (1m7.453871156s)
--- PASS: TestStartStop/group/newest-cni/serial/FirstStart (67.45s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/EnableAddonAfterStop (0.17s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/EnableAddonAfterStop
start_stop_delete_test.go:237: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Host}} -p default-k8s-diff-port-925051 -n default-k8s-diff-port-925051
start_stop_delete_test.go:237: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.Host}} -p default-k8s-diff-port-925051 -n default-k8s-diff-port-925051: exit status 7 (63.543991ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:237: status error: exit status 7 (may be ok)
start_stop_delete_test.go:244: (dbg) Run:  out/minikube-linux-amd64 addons enable dashboard -p default-k8s-diff-port-925051 --images=MetricsScraper=registry.k8s.io/echoserver:1.4
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/EnableAddonAfterStop (0.17s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/SecondStart (75.66s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/SecondStart
start_stop_delete_test.go:254: (dbg) Run:  out/minikube-linux-amd64 start -p default-k8s-diff-port-925051 --memory=3072 --alsologtostderr --wait=true --apiserver-port=8444 --driver=kvm2  --kubernetes-version=v1.34.1
E1123 08:57:20.441611   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/calico-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:254: (dbg) Done: out/minikube-linux-amd64 start -p default-k8s-diff-port-925051 --memory=3072 --alsologtostderr --wait=true --apiserver-port=8444 --driver=kvm2  --kubernetes-version=v1.34.1: (1m15.362829466s)
start_stop_delete_test.go:260: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Host}} -p default-k8s-diff-port-925051 -n default-k8s-diff-port-925051
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/SecondStart (75.66s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/UserAppExistsAfterStop (6.01s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/UserAppExistsAfterStop
start_stop_delete_test.go:272: (dbg) TestStartStop/group/no-preload/serial/UserAppExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:352: "kubernetes-dashboard-855c9754f9-zh9mv" [f95d3a53-bb86-4359-9b8f-82b6690f7a59] Running
E1123 08:57:23.451841   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/custom-flannel-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:57:24.910393   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/kindnet-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:272: (dbg) TestStartStop/group/no-preload/serial/UserAppExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 6.00503897s
--- PASS: TestStartStop/group/no-preload/serial/UserAppExistsAfterStop (6.01s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/AddonExistsAfterStop (5.1s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/AddonExistsAfterStop
start_stop_delete_test.go:285: (dbg) TestStartStop/group/no-preload/serial/AddonExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:352: "kubernetes-dashboard-855c9754f9-zh9mv" [f95d3a53-bb86-4359-9b8f-82b6690f7a59] Running
start_stop_delete_test.go:285: (dbg) TestStartStop/group/no-preload/serial/AddonExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 5.006962551s
start_stop_delete_test.go:289: (dbg) Run:  kubectl --context no-preload-019660 describe deploy/dashboard-metrics-scraper -n kubernetes-dashboard
--- PASS: TestStartStop/group/no-preload/serial/AddonExistsAfterStop (5.10s)

                                                
                                    
x
+
TestStartStop/group/no-preload/serial/VerifyKubernetesImages (0.21s)

                                                
                                                
=== RUN   TestStartStop/group/no-preload/serial/VerifyKubernetesImages
start_stop_delete_test.go:302: (dbg) Run:  out/minikube-linux-amd64 -p no-preload-019660 image list --format=json
start_stop_delete_test.go:302: Found non-minikube image: gcr.io/k8s-minikube/busybox:1.28.4-glibc
start_stop_delete_test.go:302: Found non-minikube image: gcr.io/k8s-minikube/gvisor-addon:2
--- PASS: TestStartStop/group/no-preload/serial/VerifyKubernetesImages (0.21s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop (9.01s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop
start_stop_delete_test.go:272: (dbg) TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:352: "kubernetes-dashboard-855c9754f9-rnv66" [4a59730a-9857-4382-bfdb-751bf357a991] Pending / Ready:ContainersNotReady (containers with unready status: [kubernetes-dashboard]) / ContainersReady:ContainersNotReady (containers with unready status: [kubernetes-dashboard])
helpers_test.go:352: "kubernetes-dashboard-855c9754f9-rnv66" [4a59730a-9857-4382-bfdb-751bf357a991] Running
E1123 08:57:56.175509   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/false-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:272: (dbg) TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 9.004796697s
--- PASS: TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop (9.01s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/AddonExistsAfterStop (5.09s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/AddonExistsAfterStop
start_stop_delete_test.go:285: (dbg) TestStartStop/group/embed-certs/serial/AddonExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:352: "kubernetes-dashboard-855c9754f9-rnv66" [4a59730a-9857-4382-bfdb-751bf357a991] Running
start_stop_delete_test.go:285: (dbg) TestStartStop/group/embed-certs/serial/AddonExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 5.006855093s
start_stop_delete_test.go:289: (dbg) Run:  kubectl --context embed-certs-059363 describe deploy/dashboard-metrics-scraper -n kubernetes-dashboard
--- PASS: TestStartStop/group/embed-certs/serial/AddonExistsAfterStop (5.09s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/VerifyKubernetesImages (0.28s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/VerifyKubernetesImages
start_stop_delete_test.go:302: (dbg) Run:  out/minikube-linux-amd64 -p embed-certs-059363 image list --format=json
start_stop_delete_test.go:302: Found non-minikube image: gcr.io/k8s-minikube/busybox:1.28.4-glibc
start_stop_delete_test.go:302: Found non-minikube image: gcr.io/k8s-minikube/gvisor-addon:2
--- PASS: TestStartStop/group/embed-certs/serial/VerifyKubernetesImages (0.28s)

                                                
                                    
x
+
TestStartStop/group/embed-certs/serial/Pause (3.36s)

                                                
                                                
=== RUN   TestStartStop/group/embed-certs/serial/Pause
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-amd64 pause -p embed-certs-059363 --alsologtostderr -v=1
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-amd64 status --format={{.APIServer}} -p embed-certs-059363 -n embed-certs-059363
start_stop_delete_test.go:309: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.APIServer}} -p embed-certs-059363 -n embed-certs-059363: exit status 2 (283.748947ms)

                                                
                                                
-- stdout --
	Paused

                                                
                                                
-- /stdout --
start_stop_delete_test.go:309: status error: exit status 2 (may be ok)
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Kubelet}} -p embed-certs-059363 -n embed-certs-059363
start_stop_delete_test.go:309: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.Kubelet}} -p embed-certs-059363 -n embed-certs-059363: exit status 2 (298.78402ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:309: status error: exit status 2 (may be ok)
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-amd64 unpause -p embed-certs-059363 --alsologtostderr -v=1
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-amd64 status --format={{.APIServer}} -p embed-certs-059363 -n embed-certs-059363
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Kubelet}} -p embed-certs-059363 -n embed-certs-059363
--- PASS: TestStartStop/group/embed-certs/serial/Pause (3.36s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/DeployApp (0s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/DeployApp
--- PASS: TestStartStop/group/newest-cni/serial/DeployApp (0.00s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/EnableAddonWhileActive (1s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/EnableAddonWhileActive
start_stop_delete_test.go:203: (dbg) Run:  out/minikube-linux-amd64 addons enable metrics-server -p newest-cni-078196 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain
start_stop_delete_test.go:203: (dbg) Done: out/minikube-linux-amd64 addons enable metrics-server -p newest-cni-078196 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain: (1.003082845s)
start_stop_delete_test.go:209: WARNING: cni mode requires additional setup before pods can schedule :(
--- PASS: TestStartStop/group/newest-cni/serial/EnableAddonWhileActive (1.00s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/Stop (13.58s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/Stop
start_stop_delete_test.go:226: (dbg) Run:  out/minikube-linux-amd64 stop -p newest-cni-078196 --alsologtostderr -v=3
E1123 08:58:18.631050   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/enable-default-cni-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:58:18.637591   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/enable-default-cni-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:58:18.649150   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/enable-default-cni-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:58:18.670684   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/enable-default-cni-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:58:18.712162   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/enable-default-cni-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:58:18.793912   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/enable-default-cni-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:58:18.955184   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/enable-default-cni-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:58:19.276991   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/enable-default-cni-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:58:19.918594   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/enable-default-cni-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:58:21.200080   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/enable-default-cni-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:58:21.885990   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/calico-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:58:23.761590   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/enable-default-cni-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:58:24.895535   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/custom-flannel-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:58:26.899566   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/false-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:58:28.883906   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/enable-default-cni-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:226: (dbg) Done: out/minikube-linux-amd64 stop -p newest-cni-078196 --alsologtostderr -v=3: (13.58418906s)
--- PASS: TestStartStop/group/newest-cni/serial/Stop (13.58s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/EnableAddonAfterStop (0.16s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/EnableAddonAfterStop
start_stop_delete_test.go:237: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Host}} -p newest-cni-078196 -n newest-cni-078196
start_stop_delete_test.go:237: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.Host}} -p newest-cni-078196 -n newest-cni-078196: exit status 7 (71.354708ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:237: status error: exit status 7 (may be ok)
start_stop_delete_test.go:244: (dbg) Run:  out/minikube-linux-amd64 addons enable dashboard -p newest-cni-078196 --images=MetricsScraper=registry.k8s.io/echoserver:1.4
--- PASS: TestStartStop/group/newest-cni/serial/EnableAddonAfterStop (0.16s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/SecondStart (41.7s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/SecondStart
start_stop_delete_test.go:254: (dbg) Run:  out/minikube-linux-amd64 start -p newest-cni-078196 --memory=3072 --alsologtostderr --wait=apiserver,system_pods,default_sa --network-plugin=cni --extra-config=kubeadm.pod-network-cidr=10.42.0.0/16 --driver=kvm2  --kubernetes-version=v1.34.1
start_stop_delete_test.go:254: (dbg) Done: out/minikube-linux-amd64 start -p newest-cni-078196 --memory=3072 --alsologtostderr --wait=apiserver,system_pods,default_sa --network-plugin=cni --extra-config=kubeadm.pod-network-cidr=10.42.0.0/16 --driver=kvm2  --kubernetes-version=v1.34.1: (41.44091302s)
start_stop_delete_test.go:260: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Host}} -p newest-cni-078196 -n newest-cni-078196
--- PASS: TestStartStop/group/newest-cni/serial/SecondStart (41.70s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/UserAppExistsAfterStop (11s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/UserAppExistsAfterStop
start_stop_delete_test.go:272: (dbg) TestStartStop/group/default-k8s-diff-port/serial/UserAppExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:352: "kubernetes-dashboard-855c9754f9-vvxdj" [48e61acb-ad88-4993-90a3-c52a9067b6d0] Pending / Ready:ContainersNotReady (containers with unready status: [kubernetes-dashboard]) / ContainersReady:ContainersNotReady (containers with unready status: [kubernetes-dashboard])
helpers_test.go:352: "kubernetes-dashboard-855c9754f9-vvxdj" [48e61acb-ad88-4993-90a3-c52a9067b6d0] Running
E1123 08:58:39.125484   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/enable-default-cni-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:58:41.147170   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/flannel-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:58:41.153681   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/flannel-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:58:41.165162   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/flannel-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:58:41.186728   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/flannel-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:58:41.228273   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/flannel-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:58:41.309792   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/flannel-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:58:41.471163   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/flannel-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:58:41.793017   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/flannel-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:58:42.434446   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/flannel-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:272: (dbg) TestStartStop/group/default-k8s-diff-port/serial/UserAppExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 11.003725828s
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/UserAppExistsAfterStop (11.00s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/AddonExistsAfterStop (5.08s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/AddonExistsAfterStop
start_stop_delete_test.go:285: (dbg) TestStartStop/group/default-k8s-diff-port/serial/AddonExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ...
helpers_test.go:352: "kubernetes-dashboard-855c9754f9-vvxdj" [48e61acb-ad88-4993-90a3-c52a9067b6d0] Running
E1123 08:58:43.716442   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/flannel-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:58:46.278147   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/flannel-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E1123 08:58:46.832675   22148 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21966-18241/.minikube/profiles/kindnet-564164/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
start_stop_delete_test.go:285: (dbg) TestStartStop/group/default-k8s-diff-port/serial/AddonExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 5.004889997s
start_stop_delete_test.go:289: (dbg) Run:  kubectl --context default-k8s-diff-port-925051 describe deploy/dashboard-metrics-scraper -n kubernetes-dashboard
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/AddonExistsAfterStop (5.08s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/VerifyKubernetesImages (0.2s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/VerifyKubernetesImages
start_stop_delete_test.go:302: (dbg) Run:  out/minikube-linux-amd64 -p default-k8s-diff-port-925051 image list --format=json
start_stop_delete_test.go:302: Found non-minikube image: gcr.io/k8s-minikube/gvisor-addon:2
start_stop_delete_test.go:302: Found non-minikube image: gcr.io/k8s-minikube/busybox:1.28.4-glibc
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/VerifyKubernetesImages (0.20s)

                                                
                                    
x
+
TestStartStop/group/default-k8s-diff-port/serial/Pause (2.63s)

                                                
                                                
=== RUN   TestStartStop/group/default-k8s-diff-port/serial/Pause
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-amd64 pause -p default-k8s-diff-port-925051 --alsologtostderr -v=1
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-amd64 status --format={{.APIServer}} -p default-k8s-diff-port-925051 -n default-k8s-diff-port-925051
start_stop_delete_test.go:309: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.APIServer}} -p default-k8s-diff-port-925051 -n default-k8s-diff-port-925051: exit status 2 (247.109836ms)

                                                
                                                
-- stdout --
	Paused

                                                
                                                
-- /stdout --
start_stop_delete_test.go:309: status error: exit status 2 (may be ok)
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Kubelet}} -p default-k8s-diff-port-925051 -n default-k8s-diff-port-925051
start_stop_delete_test.go:309: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.Kubelet}} -p default-k8s-diff-port-925051 -n default-k8s-diff-port-925051: exit status 2 (213.242005ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:309: status error: exit status 2 (may be ok)
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-amd64 unpause -p default-k8s-diff-port-925051 --alsologtostderr -v=1
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-amd64 status --format={{.APIServer}} -p default-k8s-diff-port-925051 -n default-k8s-diff-port-925051
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Kubelet}} -p default-k8s-diff-port-925051 -n default-k8s-diff-port-925051
--- PASS: TestStartStop/group/default-k8s-diff-port/serial/Pause (2.63s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/UserAppExistsAfterStop (0s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/UserAppExistsAfterStop
start_stop_delete_test.go:271: WARNING: cni mode requires additional setup before pods can schedule :(
--- PASS: TestStartStop/group/newest-cni/serial/UserAppExistsAfterStop (0.00s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/AddonExistsAfterStop (0s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/AddonExistsAfterStop
start_stop_delete_test.go:282: WARNING: cni mode requires additional setup before pods can schedule :(
--- PASS: TestStartStop/group/newest-cni/serial/AddonExistsAfterStop (0.00s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/VerifyKubernetesImages (0.24s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/VerifyKubernetesImages
start_stop_delete_test.go:302: (dbg) Run:  out/minikube-linux-amd64 -p newest-cni-078196 image list --format=json
start_stop_delete_test.go:302: Found non-minikube image: gcr.io/k8s-minikube/gvisor-addon:2
--- PASS: TestStartStop/group/newest-cni/serial/VerifyKubernetesImages (0.24s)

                                                
                                    
x
+
TestStartStop/group/newest-cni/serial/Pause (2.65s)

                                                
                                                
=== RUN   TestStartStop/group/newest-cni/serial/Pause
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-amd64 pause -p newest-cni-078196 --alsologtostderr -v=1
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-amd64 status --format={{.APIServer}} -p newest-cni-078196 -n newest-cni-078196
start_stop_delete_test.go:309: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.APIServer}} -p newest-cni-078196 -n newest-cni-078196: exit status 2 (222.650357ms)

                                                
                                                
-- stdout --
	Paused

                                                
                                                
-- /stdout --
start_stop_delete_test.go:309: status error: exit status 2 (may be ok)
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Kubelet}} -p newest-cni-078196 -n newest-cni-078196
start_stop_delete_test.go:309: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.Kubelet}} -p newest-cni-078196 -n newest-cni-078196: exit status 2 (213.076739ms)

                                                
                                                
-- stdout --
	Stopped

                                                
                                                
-- /stdout --
start_stop_delete_test.go:309: status error: exit status 2 (may be ok)
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-amd64 unpause -p newest-cni-078196 --alsologtostderr -v=1
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-amd64 status --format={{.APIServer}} -p newest-cni-078196 -n newest-cni-078196
start_stop_delete_test.go:309: (dbg) Run:  out/minikube-linux-amd64 status --format={{.Kubelet}} -p newest-cni-078196 -n newest-cni-078196
--- PASS: TestStartStop/group/newest-cni/serial/Pause (2.65s)

                                                
                                    

Test skip (34/366)

Order skiped test Duration
5 TestDownloadOnly/v1.28.0/cached-images 0
6 TestDownloadOnly/v1.28.0/binaries 0
7 TestDownloadOnly/v1.28.0/kubectl 0
14 TestDownloadOnly/v1.34.1/cached-images 0
15 TestDownloadOnly/v1.34.1/binaries 0
16 TestDownloadOnly/v1.34.1/kubectl 0
20 TestDownloadOnlyKic 0
33 TestAddons/serial/GCPAuth/RealCredentials 0
40 TestAddons/parallel/Olm 0
47 TestAddons/parallel/AmdGpuDevicePlugin 0
54 TestDockerEnvContainerd 0
55 TestHyperKitDriverInstallOrUpdate 0
56 TestHyperkitDriverSkipUpgrade 0
108 TestFunctional/parallel/PodmanEnv 0
122 TestFunctional/parallel/TunnelCmd/serial/RunSecondTunnel 0.01
123 TestFunctional/parallel/TunnelCmd/serial/StartTunnel 0.01
124 TestFunctional/parallel/TunnelCmd/serial/WaitService 0.01
125 TestFunctional/parallel/TunnelCmd/serial/AccessDirect 0.01
126 TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDig 0.01
127 TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDscacheutil 0.01
128 TestFunctional/parallel/TunnelCmd/serial/AccessThroughDNS 0.01
129 TestFunctional/parallel/TunnelCmd/serial/DeleteTunnel 0.01
157 TestFunctionalNewestKubernetes 0
187 TestImageBuild/serial/validateImageBuildWithBuildEnv 0
215 TestKicCustomNetwork 0
216 TestKicExistingNetwork 0
217 TestKicCustomSubnet 0
218 TestKicStaticIP 0
250 TestChangeNoneUser 0
253 TestScheduledStopWindows 0
257 TestInsufficientStorage 0
261 TestMissingContainerUpgrade 0
272 TestNetworkPlugins/group/cilium 4.28
294 TestStartStop/group/disable-driver-mounts 0.2
x
+
TestDownloadOnly/v1.28.0/cached-images (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.28.0/cached-images
aaa_download_only_test.go:128: Preload exists, images won't be cached
--- SKIP: TestDownloadOnly/v1.28.0/cached-images (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.28.0/binaries (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.28.0/binaries
aaa_download_only_test.go:150: Preload exists, binaries are present within.
--- SKIP: TestDownloadOnly/v1.28.0/binaries (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.28.0/kubectl (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.28.0/kubectl
aaa_download_only_test.go:166: Test for darwin and windows
--- SKIP: TestDownloadOnly/v1.28.0/kubectl (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.34.1/cached-images (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.34.1/cached-images
aaa_download_only_test.go:128: Preload exists, images won't be cached
--- SKIP: TestDownloadOnly/v1.34.1/cached-images (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.34.1/binaries (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.34.1/binaries
aaa_download_only_test.go:150: Preload exists, binaries are present within.
--- SKIP: TestDownloadOnly/v1.34.1/binaries (0.00s)

                                                
                                    
x
+
TestDownloadOnly/v1.34.1/kubectl (0s)

                                                
                                                
=== RUN   TestDownloadOnly/v1.34.1/kubectl
aaa_download_only_test.go:166: Test for darwin and windows
--- SKIP: TestDownloadOnly/v1.34.1/kubectl (0.00s)

                                                
                                    
x
+
TestDownloadOnlyKic (0s)

                                                
                                                
=== RUN   TestDownloadOnlyKic
aaa_download_only_test.go:219: skipping, only for docker or podman driver
--- SKIP: TestDownloadOnlyKic (0.00s)

                                                
                                    
x
+
TestAddons/serial/GCPAuth/RealCredentials (0s)

                                                
                                                
=== RUN   TestAddons/serial/GCPAuth/RealCredentials
addons_test.go:759: This test requires a GCE instance (excluding Cloud Shell) with a container based driver
--- SKIP: TestAddons/serial/GCPAuth/RealCredentials (0.00s)

                                                
                                    
x
+
TestAddons/parallel/Olm (0s)

                                                
                                                
=== RUN   TestAddons/parallel/Olm
=== PAUSE TestAddons/parallel/Olm

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/Olm
addons_test.go:483: Skipping OLM addon test until https://github.com/operator-framework/operator-lifecycle-manager/issues/2534 is resolved
--- SKIP: TestAddons/parallel/Olm (0.00s)

                                                
                                    
x
+
TestAddons/parallel/AmdGpuDevicePlugin (0s)

                                                
                                                
=== RUN   TestAddons/parallel/AmdGpuDevicePlugin
=== PAUSE TestAddons/parallel/AmdGpuDevicePlugin

                                                
                                                

                                                
                                                
=== CONT  TestAddons/parallel/AmdGpuDevicePlugin
addons_test.go:1033: skip amd gpu test on all but docker driver and amd64 platform
--- SKIP: TestAddons/parallel/AmdGpuDevicePlugin (0.00s)

                                                
                                    
x
+
TestDockerEnvContainerd (0s)

                                                
                                                
=== RUN   TestDockerEnvContainerd
docker_test.go:170: running with docker false linux amd64
docker_test.go:172: skipping: TestDockerEnvContainerd can only be run with the containerd runtime on Docker driver
--- SKIP: TestDockerEnvContainerd (0.00s)

                                                
                                    
x
+
TestHyperKitDriverInstallOrUpdate (0s)

                                                
                                                
=== RUN   TestHyperKitDriverInstallOrUpdate
driver_install_or_update_test.go:37: Skip if not darwin.
--- SKIP: TestHyperKitDriverInstallOrUpdate (0.00s)

                                                
                                    
x
+
TestHyperkitDriverSkipUpgrade (0s)

                                                
                                                
=== RUN   TestHyperkitDriverSkipUpgrade
driver_install_or_update_test.go:101: Skip if not darwin.
--- SKIP: TestHyperkitDriverSkipUpgrade (0.00s)

                                                
                                    
x
+
TestFunctional/parallel/PodmanEnv (0s)

                                                
                                                
=== RUN   TestFunctional/parallel/PodmanEnv
=== PAUSE TestFunctional/parallel/PodmanEnv

                                                
                                                

                                                
                                                
=== CONT  TestFunctional/parallel/PodmanEnv
functional_test.go:565: only validate podman env with docker container runtime, currently testing docker
--- SKIP: TestFunctional/parallel/PodmanEnv (0.00s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/RunSecondTunnel (0.01s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/RunSecondTunnel
functional_test_tunnel_test.go:90: password required to execute 'route', skipping testTunnel: exit status 1
--- SKIP: TestFunctional/parallel/TunnelCmd/serial/RunSecondTunnel (0.01s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/StartTunnel (0.01s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/StartTunnel
functional_test_tunnel_test.go:90: password required to execute 'route', skipping testTunnel: exit status 1
--- SKIP: TestFunctional/parallel/TunnelCmd/serial/StartTunnel (0.01s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/WaitService (0.01s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/WaitService
functional_test_tunnel_test.go:90: password required to execute 'route', skipping testTunnel: exit status 1
--- SKIP: TestFunctional/parallel/TunnelCmd/serial/WaitService (0.01s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/AccessDirect (0.01s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/AccessDirect
functional_test_tunnel_test.go:90: password required to execute 'route', skipping testTunnel: exit status 1
--- SKIP: TestFunctional/parallel/TunnelCmd/serial/AccessDirect (0.01s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDig (0.01s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDig
functional_test_tunnel_test.go:90: password required to execute 'route', skipping testTunnel: exit status 1
--- SKIP: TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDig (0.01s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDscacheutil (0.01s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDscacheutil
functional_test_tunnel_test.go:90: password required to execute 'route', skipping testTunnel: exit status 1
--- SKIP: TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDscacheutil (0.01s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/AccessThroughDNS (0.01s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/AccessThroughDNS
functional_test_tunnel_test.go:90: password required to execute 'route', skipping testTunnel: exit status 1
--- SKIP: TestFunctional/parallel/TunnelCmd/serial/AccessThroughDNS (0.01s)

                                                
                                    
x
+
TestFunctional/parallel/TunnelCmd/serial/DeleteTunnel (0.01s)

                                                
                                                
=== RUN   TestFunctional/parallel/TunnelCmd/serial/DeleteTunnel
functional_test_tunnel_test.go:90: password required to execute 'route', skipping testTunnel: exit status 1
--- SKIP: TestFunctional/parallel/TunnelCmd/serial/DeleteTunnel (0.01s)

                                                
                                    
x
+
TestFunctionalNewestKubernetes (0s)

                                                
                                                
=== RUN   TestFunctionalNewestKubernetes
functional_test.go:82: 
--- SKIP: TestFunctionalNewestKubernetes (0.00s)

                                                
                                    
x
+
TestImageBuild/serial/validateImageBuildWithBuildEnv (0s)

                                                
                                                
=== RUN   TestImageBuild/serial/validateImageBuildWithBuildEnv
image_test.go:114: skipping due to https://github.com/kubernetes/minikube/issues/12431
--- SKIP: TestImageBuild/serial/validateImageBuildWithBuildEnv (0.00s)

                                                
                                    
x
+
TestKicCustomNetwork (0s)

                                                
                                                
=== RUN   TestKicCustomNetwork
kic_custom_network_test.go:34: only runs with docker driver
--- SKIP: TestKicCustomNetwork (0.00s)

                                                
                                    
x
+
TestKicExistingNetwork (0s)

                                                
                                                
=== RUN   TestKicExistingNetwork
kic_custom_network_test.go:73: only runs with docker driver
--- SKIP: TestKicExistingNetwork (0.00s)

                                                
                                    
x
+
TestKicCustomSubnet (0s)

                                                
                                                
=== RUN   TestKicCustomSubnet
kic_custom_network_test.go:102: only runs with docker/podman driver
--- SKIP: TestKicCustomSubnet (0.00s)

                                                
                                    
x
+
TestKicStaticIP (0s)

                                                
                                                
=== RUN   TestKicStaticIP
kic_custom_network_test.go:123: only run with docker/podman driver
--- SKIP: TestKicStaticIP (0.00s)

                                                
                                    
x
+
TestChangeNoneUser (0s)

                                                
                                                
=== RUN   TestChangeNoneUser
none_test.go:38: Test requires none driver and SUDO_USER env to not be empty
--- SKIP: TestChangeNoneUser (0.00s)

                                                
                                    
x
+
TestScheduledStopWindows (0s)

                                                
                                                
=== RUN   TestScheduledStopWindows
scheduled_stop_test.go:42: test only runs on windows
--- SKIP: TestScheduledStopWindows (0.00s)

                                                
                                    
x
+
TestInsufficientStorage (0s)

                                                
                                                
=== RUN   TestInsufficientStorage
status_test.go:38: only runs with docker driver
--- SKIP: TestInsufficientStorage (0.00s)

                                                
                                    
x
+
TestMissingContainerUpgrade (0s)

                                                
                                                
=== RUN   TestMissingContainerUpgrade
version_upgrade_test.go:284: This test is only for Docker
--- SKIP: TestMissingContainerUpgrade (0.00s)

                                                
                                    
x
+
TestNetworkPlugins/group/cilium (4.28s)

                                                
                                                
=== RUN   TestNetworkPlugins/group/cilium
net_test.go:102: Skipping the test as it's interfering with other tests and is outdated
panic.go:615: 
----------------------- debugLogs start: cilium-564164 [pass: true] --------------------------------
>>> netcat: nslookup kubernetes.default:
Error in configuration: context was not found for specified context: cilium-564164

                                                
                                                

                                                
                                                
>>> netcat: nslookup debug kubernetes.default a-records:
Error in configuration: context was not found for specified context: cilium-564164

                                                
                                                

                                                
                                                
>>> netcat: dig search kubernetes.default:
Error in configuration: context was not found for specified context: cilium-564164

                                                
                                                

                                                
                                                
>>> netcat: dig @10.96.0.10 kubernetes.default.svc.cluster.local udp/53:
Error in configuration: context was not found for specified context: cilium-564164

                                                
                                                

                                                
                                                
>>> netcat: dig @10.96.0.10 kubernetes.default.svc.cluster.local tcp/53:
Error in configuration: context was not found for specified context: cilium-564164

                                                
                                                

                                                
                                                
>>> netcat: nc 10.96.0.10 udp/53:
Error in configuration: context was not found for specified context: cilium-564164

                                                
                                                

                                                
                                                
>>> netcat: nc 10.96.0.10 tcp/53:
Error in configuration: context was not found for specified context: cilium-564164

                                                
                                                

                                                
                                                
>>> netcat: /etc/nsswitch.conf:
Error in configuration: context was not found for specified context: cilium-564164

                                                
                                                

                                                
                                                
>>> netcat: /etc/hosts:
Error in configuration: context was not found for specified context: cilium-564164

                                                
                                                

                                                
                                                
>>> netcat: /etc/resolv.conf:
Error in configuration: context was not found for specified context: cilium-564164

                                                
                                                

                                                
                                                
>>> host: /etc/nsswitch.conf:
* Profile "cilium-564164" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-564164"

                                                
                                                

                                                
                                                
>>> host: /etc/hosts:
* Profile "cilium-564164" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-564164"

                                                
                                                

                                                
                                                
>>> host: /etc/resolv.conf:
* Profile "cilium-564164" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-564164"

                                                
                                                

                                                
                                                
>>> k8s: nodes, services, endpoints, daemon sets, deployments and pods, :
Error in configuration: context was not found for specified context: cilium-564164

                                                
                                                

                                                
                                                
>>> host: crictl pods:
* Profile "cilium-564164" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-564164"

                                                
                                                

                                                
                                                
>>> host: crictl containers:
* Profile "cilium-564164" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-564164"

                                                
                                                

                                                
                                                
>>> k8s: describe netcat deployment:
error: context "cilium-564164" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe netcat pod(s):
error: context "cilium-564164" does not exist

                                                
                                                

                                                
                                                
>>> k8s: netcat logs:
error: context "cilium-564164" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe coredns deployment:
error: context "cilium-564164" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe coredns pods:
error: context "cilium-564164" does not exist

                                                
                                                

                                                
                                                
>>> k8s: coredns logs:
error: context "cilium-564164" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe api server pod(s):
error: context "cilium-564164" does not exist

                                                
                                                

                                                
                                                
>>> k8s: api server logs:
error: context "cilium-564164" does not exist

                                                
                                                

                                                
                                                
>>> host: /etc/cni:
* Profile "cilium-564164" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-564164"

                                                
                                                

                                                
                                                
>>> host: ip a s:
* Profile "cilium-564164" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-564164"

                                                
                                                

                                                
                                                
>>> host: ip r s:
* Profile "cilium-564164" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-564164"

                                                
                                                

                                                
                                                
>>> host: iptables-save:
* Profile "cilium-564164" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-564164"

                                                
                                                

                                                
                                                
>>> host: iptables table nat:
* Profile "cilium-564164" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-564164"

                                                
                                                

                                                
                                                
>>> k8s: describe cilium daemon set:
Error in configuration: context was not found for specified context: cilium-564164

                                                
                                                

                                                
                                                
>>> k8s: describe cilium daemon set pod(s):
Error in configuration: context was not found for specified context: cilium-564164

                                                
                                                

                                                
                                                
>>> k8s: cilium daemon set container(s) logs (current):
error: context "cilium-564164" does not exist

                                                
                                                

                                                
                                                
>>> k8s: cilium daemon set container(s) logs (previous):
error: context "cilium-564164" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe cilium deployment:
Error in configuration: context was not found for specified context: cilium-564164

                                                
                                                

                                                
                                                
>>> k8s: describe cilium deployment pod(s):
Error in configuration: context was not found for specified context: cilium-564164

                                                
                                                

                                                
                                                
>>> k8s: cilium deployment container(s) logs (current):
error: context "cilium-564164" does not exist

                                                
                                                

                                                
                                                
>>> k8s: cilium deployment container(s) logs (previous):
error: context "cilium-564164" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe kube-proxy daemon set:
error: context "cilium-564164" does not exist

                                                
                                                

                                                
                                                
>>> k8s: describe kube-proxy pod(s):
error: context "cilium-564164" does not exist

                                                
                                                

                                                
                                                
>>> k8s: kube-proxy logs:
error: context "cilium-564164" does not exist

                                                
                                                

                                                
                                                
>>> host: kubelet daemon status:
* Profile "cilium-564164" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-564164"

                                                
                                                

                                                
                                                
>>> host: kubelet daemon config:
* Profile "cilium-564164" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-564164"

                                                
                                                

                                                
                                                
>>> k8s: kubelet logs:
* Profile "cilium-564164" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-564164"

                                                
                                                

                                                
                                                
>>> host: /etc/kubernetes/kubelet.conf:
* Profile "cilium-564164" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-564164"

                                                
                                                

                                                
                                                
>>> host: /var/lib/kubelet/config.yaml:
* Profile "cilium-564164" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-564164"

                                                
                                                

                                                
                                                
>>> k8s: kubectl config:
apiVersion: v1
clusters: null
contexts: null
current-context: ""
kind: Config
users: null

                                                
                                                

                                                
                                                
>>> k8s: cms:
Error in configuration: context was not found for specified context: cilium-564164

                                                
                                                

                                                
                                                
>>> host: docker daemon status:
* Profile "cilium-564164" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-564164"

                                                
                                                

                                                
                                                
>>> host: docker daemon config:
* Profile "cilium-564164" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-564164"

                                                
                                                

                                                
                                                
>>> host: /etc/docker/daemon.json:
* Profile "cilium-564164" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-564164"

                                                
                                                

                                                
                                                
>>> host: docker system info:
* Profile "cilium-564164" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-564164"

                                                
                                                

                                                
                                                
>>> host: cri-docker daemon status:
* Profile "cilium-564164" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-564164"

                                                
                                                

                                                
                                                
>>> host: cri-docker daemon config:
* Profile "cilium-564164" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-564164"

                                                
                                                

                                                
                                                
>>> host: /etc/systemd/system/cri-docker.service.d/10-cni.conf:
* Profile "cilium-564164" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-564164"

                                                
                                                

                                                
                                                
>>> host: /usr/lib/systemd/system/cri-docker.service:
* Profile "cilium-564164" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-564164"

                                                
                                                

                                                
                                                
>>> host: cri-dockerd version:
* Profile "cilium-564164" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-564164"

                                                
                                                

                                                
                                                
>>> host: containerd daemon status:
* Profile "cilium-564164" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-564164"

                                                
                                                

                                                
                                                
>>> host: containerd daemon config:
* Profile "cilium-564164" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-564164"

                                                
                                                

                                                
                                                
>>> host: /lib/systemd/system/containerd.service:
* Profile "cilium-564164" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-564164"

                                                
                                                

                                                
                                                
>>> host: /etc/containerd/config.toml:
* Profile "cilium-564164" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-564164"

                                                
                                                

                                                
                                                
>>> host: containerd config dump:
* Profile "cilium-564164" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-564164"

                                                
                                                

                                                
                                                
>>> host: crio daemon status:
* Profile "cilium-564164" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-564164"

                                                
                                                

                                                
                                                
>>> host: crio daemon config:
* Profile "cilium-564164" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-564164"

                                                
                                                

                                                
                                                
>>> host: /etc/crio:
* Profile "cilium-564164" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-564164"

                                                
                                                

                                                
                                                
>>> host: crio config:
* Profile "cilium-564164" not found. Run "minikube profile list" to view all profiles.
To start a cluster, run: "minikube start -p cilium-564164"

                                                
                                                
----------------------- debugLogs end: cilium-564164 [took: 4.087161098s] --------------------------------
helpers_test.go:175: Cleaning up "cilium-564164" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-amd64 delete -p cilium-564164
--- SKIP: TestNetworkPlugins/group/cilium (4.28s)

                                                
                                    
x
+
TestStartStop/group/disable-driver-mounts (0.2s)

                                                
                                                
=== RUN   TestStartStop/group/disable-driver-mounts
=== PAUSE TestStartStop/group/disable-driver-mounts

                                                
                                                

                                                
                                                
=== CONT  TestStartStop/group/disable-driver-mounts
start_stop_delete_test.go:101: skipping TestStartStop/group/disable-driver-mounts - only runs on virtualbox
helpers_test.go:175: Cleaning up "disable-driver-mounts-807128" profile ...
helpers_test.go:178: (dbg) Run:  out/minikube-linux-amd64 delete -p disable-driver-mounts-807128
--- SKIP: TestStartStop/group/disable-driver-mounts (0.20s)

                                                
                                    
Copied to clipboard