=== RUN TestMultiControlPlane/serial/RestartCluster
ha_test.go:562: (dbg) Run: out/minikube-linux-amd64 start -p ha-783738 --wait=true -v=7 --alsologtostderr --driver=kvm2
E0217 11:58:34.519547 84502 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/addons-603759/client.crt: no such file or directory" logger="UnhandledError"
ha_test.go:562: (dbg) Non-zero exit: out/minikube-linux-amd64 start -p ha-783738 --wait=true -v=7 --alsologtostderr --driver=kvm2 : exit status 90 (1m50.94698712s)
-- stdout --
* [ha-783738] minikube v1.35.0 on Ubuntu 20.04 (kvm/amd64)
- MINIKUBE_LOCATION=20427
- MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
- KUBECONFIG=/home/jenkins/minikube-integration/20427-77349/kubeconfig
- MINIKUBE_HOME=/home/jenkins/minikube-integration/20427-77349/.minikube
- MINIKUBE_BIN=out/minikube-linux-amd64
- MINIKUBE_FORCE_SYSTEMD=
* Using the kvm2 driver based on existing profile
* Starting "ha-783738" primary control-plane node in "ha-783738" cluster
* Restarting existing kvm2 VM for "ha-783738" ...
* Preparing Kubernetes v1.32.1 on Docker 27.4.0 ...
* Enabled addons:
* Starting "ha-783738-m02" control-plane node in "ha-783738" cluster
* Restarting existing kvm2 VM for "ha-783738-m02" ...
* Found network options:
- NO_PROXY=192.168.39.249
-- /stdout --
** stderr **
I0217 11:56:50.215291 100380 out.go:345] Setting OutFile to fd 1 ...
I0217 11:56:50.215609 100380 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0217 11:56:50.215619 100380 out.go:358] Setting ErrFile to fd 2...
I0217 11:56:50.215624 100380 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0217 11:56:50.215819 100380 root.go:338] Updating PATH: /home/jenkins/minikube-integration/20427-77349/.minikube/bin
I0217 11:56:50.216353 100380 out.go:352] Setting JSON to false
I0217 11:56:50.217237 100380 start.go:129] hostinfo: {"hostname":"ubuntu-20-agent-7","uptime":5958,"bootTime":1739787452,"procs":182,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1075-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I0217 11:56:50.217362 100380 start.go:139] virtualization: kvm guest
I0217 11:56:50.219910 100380 out.go:177] * [ha-783738] minikube v1.35.0 on Ubuntu 20.04 (kvm/amd64)
I0217 11:56:50.221323 100380 out.go:177] - MINIKUBE_LOCATION=20427
I0217 11:56:50.221334 100380 notify.go:220] Checking for updates...
I0217 11:56:50.223835 100380 out.go:177] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I0217 11:56:50.224954 100380 out.go:177] - KUBECONFIG=/home/jenkins/minikube-integration/20427-77349/kubeconfig
I0217 11:56:50.226180 100380 out.go:177] - MINIKUBE_HOME=/home/jenkins/minikube-integration/20427-77349/.minikube
I0217 11:56:50.227361 100380 out.go:177] - MINIKUBE_BIN=out/minikube-linux-amd64
I0217 11:56:50.228473 100380 out.go:177] - MINIKUBE_FORCE_SYSTEMD=
I0217 11:56:50.229885 100380 config.go:182] Loaded profile config "ha-783738": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.32.1
I0217 11:56:50.230261 100380 main.go:141] libmachine: Found binary path at /home/jenkins/workspace/KVM_Linux_integration/out/docker-machine-driver-kvm2
I0217 11:56:50.230308 100380 main.go:141] libmachine: Launching plugin server for driver kvm2
I0217 11:56:50.245239 100380 main.go:141] libmachine: Plugin server listening at address 127.0.0.1:46091
I0217 11:56:50.245761 100380 main.go:141] libmachine: () Calling .GetVersion
I0217 11:56:50.246359 100380 main.go:141] libmachine: Using API Version 1
I0217 11:56:50.246382 100380 main.go:141] libmachine: () Calling .SetConfigRaw
I0217 11:56:50.246775 100380 main.go:141] libmachine: () Calling .GetMachineName
I0217 11:56:50.246962 100380 main.go:141] libmachine: (ha-783738) Calling .DriverName
I0217 11:56:50.247230 100380 driver.go:394] Setting default libvirt URI to qemu:///system
I0217 11:56:50.247538 100380 main.go:141] libmachine: Found binary path at /home/jenkins/workspace/KVM_Linux_integration/out/docker-machine-driver-kvm2
I0217 11:56:50.247594 100380 main.go:141] libmachine: Launching plugin server for driver kvm2
I0217 11:56:50.262713 100380 main.go:141] libmachine: Plugin server listening at address 127.0.0.1:36011
I0217 11:56:50.263097 100380 main.go:141] libmachine: () Calling .GetVersion
I0217 11:56:50.263692 100380 main.go:141] libmachine: Using API Version 1
I0217 11:56:50.263752 100380 main.go:141] libmachine: () Calling .SetConfigRaw
I0217 11:56:50.264059 100380 main.go:141] libmachine: () Calling .GetMachineName
I0217 11:56:50.264289 100380 main.go:141] libmachine: (ha-783738) Calling .DriverName
I0217 11:56:50.297981 100380 out.go:177] * Using the kvm2 driver based on existing profile
I0217 11:56:50.299143 100380 start.go:297] selected driver: kvm2
I0217 11:56:50.299155 100380 start.go:901] validating driver "kvm2" against &{Name:ha-783738 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube/iso/minikube-v1.35.0-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.46-1739182054-20387@sha256:3788b0691001f3da958b3956b3e6c1d1db8535d5286bd2e096e6e75dc609dbad Memory:2200 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.32.1 ClusterName:ha-78
3738 Namespace:default APIServerHAVIP:192.168.39.254 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.39.249 Port:8443 KubernetesVersion:v1.32.1 ContainerRuntime:docker ControlPlane:true Worker:true} {Name:m02 IP:192.168.39.31 Port:8443 KubernetesVersion:v1.32.1 ContainerRuntime:docker ControlPlane:true Worker:true} {Name:m04 IP:192.168.39.168 Port:0 KubernetesVersion:v1.32.1 ContainerRuntime: ControlPlane:false Worker:true}] Addons:map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:false efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false i
nspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:false storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOp
timizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0217 11:56:50.299304 100380 start.go:912] status for kvm2: {Installed:true Healthy:true Running:true NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I0217 11:56:50.299646 100380 install.go:52] acquiring lock: {Name:mk900956b073697a4aa6c80a27c6bb0742a99a53 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0217 11:56:50.299706 100380 install.go:117] Validating docker-machine-driver-kvm2, PATH=/home/jenkins/minikube-integration/20427-77349/.minikube/bin:/home/jenkins/workspace/KVM_Linux_integration/out/:/usr/local/bin:/usr/bin:/bin:/usr/local/games:/usr/games:/usr/local/go/bin:/home/jenkins/go/bin:/usr/local/bin/:/usr/local/go/bin/:/home/jenkins/go/bin
I0217 11:56:50.314229 100380 install.go:137] /home/jenkins/workspace/KVM_Linux_integration/out/docker-machine-driver-kvm2 version is 1.35.0
I0217 11:56:50.314917 100380 start_flags.go:947] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0217 11:56:50.314949 100380 cni.go:84] Creating CNI manager for ""
I0217 11:56:50.315000 100380 cni.go:136] multinode detected (3 nodes found), recommending kindnet
I0217 11:56:50.315060 100380 start.go:340] cluster config:
{Name:ha-783738 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube/iso/minikube-v1.35.0-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.46-1739182054-20387@sha256:3788b0691001f3da958b3956b3e6c1d1db8535d5286bd2e096e6e75dc609dbad Memory:2200 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.32.1 ClusterName:ha-783738 Namespace:default APIServerHAVIP:192.168.39.254 APIServerName:minikubeCA
APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.39.249 Port:8443 KubernetesVersion:v1.32.1 ContainerRuntime:docker ControlPlane:true Worker:true} {Name:m02 IP:192.168.39.31 Port:8443 KubernetesVersion:v1.32.1 ContainerRuntime:docker ControlPlane:true Worker:true} {Name:m04 IP:192.168.39.168 Port:0 KubernetesVersion:v1.32.1 ContainerRuntime:docker ControlPlane:false Worker:true}] Addons:map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:false efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kub
eflow:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:false storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMn
etClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0217 11:56:50.315190 100380 iso.go:125] acquiring lock: {Name:mk4380b7bda8fcd8bced9705ff1695c3fb7dac0d Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0217 11:56:50.317519 100380 out.go:177] * Starting "ha-783738" primary control-plane node in "ha-783738" cluster
I0217 11:56:50.318547 100380 preload.go:131] Checking if preload exists for k8s version v1.32.1 and runtime docker
I0217 11:56:50.318578 100380 preload.go:146] Found local preload: /home/jenkins/minikube-integration/20427-77349/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.32.1-docker-overlay2-amd64.tar.lz4
I0217 11:56:50.318588 100380 cache.go:56] Caching tarball of preloaded images
I0217 11:56:50.318681 100380 preload.go:172] Found /home/jenkins/minikube-integration/20427-77349/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.32.1-docker-overlay2-amd64.tar.lz4 in cache, skipping download
I0217 11:56:50.318695 100380 cache.go:59] Finished verifying existence of preloaded tar for v1.32.1 on docker
I0217 11:56:50.318829 100380 profile.go:143] Saving config to /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/config.json ...
I0217 11:56:50.319009 100380 start.go:360] acquireMachinesLock for ha-783738: {Name:mk05ba8323ae77ab7dcc14c378d65810d956fdc0 Clock:{} Delay:500ms Timeout:13m0s Cancel:<nil>}
I0217 11:56:50.319055 100380 start.go:364] duration metric: took 23.519µs to acquireMachinesLock for "ha-783738"
I0217 11:56:50.319080 100380 start.go:96] Skipping create...Using existing machine configuration
I0217 11:56:50.319088 100380 fix.go:54] fixHost starting:
I0217 11:56:50.319353 100380 main.go:141] libmachine: Found binary path at /home/jenkins/workspace/KVM_Linux_integration/out/docker-machine-driver-kvm2
I0217 11:56:50.319391 100380 main.go:141] libmachine: Launching plugin server for driver kvm2
I0217 11:56:50.333761 100380 main.go:141] libmachine: Plugin server listening at address 127.0.0.1:34803
I0217 11:56:50.334152 100380 main.go:141] libmachine: () Calling .GetVersion
I0217 11:56:50.334693 100380 main.go:141] libmachine: Using API Version 1
I0217 11:56:50.334714 100380 main.go:141] libmachine: () Calling .SetConfigRaw
I0217 11:56:50.335000 100380 main.go:141] libmachine: () Calling .GetMachineName
I0217 11:56:50.335210 100380 main.go:141] libmachine: (ha-783738) Calling .DriverName
I0217 11:56:50.335347 100380 main.go:141] libmachine: (ha-783738) Calling .GetState
I0217 11:56:50.336730 100380 fix.go:112] recreateIfNeeded on ha-783738: state=Stopped err=<nil>
I0217 11:56:50.336752 100380 main.go:141] libmachine: (ha-783738) Calling .DriverName
W0217 11:56:50.336864 100380 fix.go:138] unexpected machine state, will restart: <nil>
I0217 11:56:50.338814 100380 out.go:177] * Restarting existing kvm2 VM for "ha-783738" ...
I0217 11:56:50.340020 100380 main.go:141] libmachine: (ha-783738) Calling .Start
I0217 11:56:50.340200 100380 main.go:141] libmachine: (ha-783738) starting domain...
I0217 11:56:50.340221 100380 main.go:141] libmachine: (ha-783738) ensuring networks are active...
I0217 11:56:50.340845 100380 main.go:141] libmachine: (ha-783738) Ensuring network default is active
I0217 11:56:50.341268 100380 main.go:141] libmachine: (ha-783738) Ensuring network mk-ha-783738 is active
I0217 11:56:50.341612 100380 main.go:141] libmachine: (ha-783738) getting domain XML...
I0217 11:56:50.342286 100380 main.go:141] libmachine: (ha-783738) creating domain...
I0217 11:56:51.533335 100380 main.go:141] libmachine: (ha-783738) waiting for IP...
I0217 11:56:51.534198 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:56:51.534571 100380 main.go:141] libmachine: (ha-783738) DBG | unable to find current IP address of domain ha-783738 in network mk-ha-783738
I0217 11:56:51.534631 100380 main.go:141] libmachine: (ha-783738) DBG | I0217 11:56:51.534554 100416 retry.go:31] will retry after 214.112758ms: waiting for domain to come up
I0217 11:56:51.750038 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:56:51.750535 100380 main.go:141] libmachine: (ha-783738) DBG | unable to find current IP address of domain ha-783738 in network mk-ha-783738
I0217 11:56:51.750587 100380 main.go:141] libmachine: (ha-783738) DBG | I0217 11:56:51.750528 100416 retry.go:31] will retry after 287.575076ms: waiting for domain to come up
I0217 11:56:52.040019 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:56:52.040473 100380 main.go:141] libmachine: (ha-783738) DBG | unable to find current IP address of domain ha-783738 in network mk-ha-783738
I0217 11:56:52.040515 100380 main.go:141] libmachine: (ha-783738) DBG | I0217 11:56:52.040452 100416 retry.go:31] will retry after 303.389275ms: waiting for domain to come up
I0217 11:56:52.345057 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:56:52.345400 100380 main.go:141] libmachine: (ha-783738) DBG | unable to find current IP address of domain ha-783738 in network mk-ha-783738
I0217 11:56:52.345452 100380 main.go:141] libmachine: (ha-783738) DBG | I0217 11:56:52.345383 100416 retry.go:31] will retry after 580.610288ms: waiting for domain to come up
I0217 11:56:52.927102 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:56:52.927623 100380 main.go:141] libmachine: (ha-783738) DBG | unable to find current IP address of domain ha-783738 in network mk-ha-783738
I0217 11:56:52.927663 100380 main.go:141] libmachine: (ha-783738) DBG | I0217 11:56:52.927596 100416 retry.go:31] will retry after 470.88869ms: waiting for domain to come up
I0217 11:56:53.400293 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:56:53.400698 100380 main.go:141] libmachine: (ha-783738) DBG | unable to find current IP address of domain ha-783738 in network mk-ha-783738
I0217 11:56:53.400725 100380 main.go:141] libmachine: (ha-783738) DBG | I0217 11:56:53.400636 100416 retry.go:31] will retry after 645.102407ms: waiting for domain to come up
I0217 11:56:54.046798 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:56:54.047309 100380 main.go:141] libmachine: (ha-783738) DBG | unable to find current IP address of domain ha-783738 in network mk-ha-783738
I0217 11:56:54.047365 100380 main.go:141] libmachine: (ha-783738) DBG | I0217 11:56:54.047265 100416 retry.go:31] will retry after 993.016218ms: waiting for domain to come up
I0217 11:56:55.041450 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:56:55.041808 100380 main.go:141] libmachine: (ha-783738) DBG | unable to find current IP address of domain ha-783738 in network mk-ha-783738
I0217 11:56:55.041828 100380 main.go:141] libmachine: (ha-783738) DBG | I0217 11:56:55.041790 100416 retry.go:31] will retry after 1.096274529s: waiting for domain to come up
I0217 11:56:56.139475 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:56:56.139892 100380 main.go:141] libmachine: (ha-783738) DBG | unable to find current IP address of domain ha-783738 in network mk-ha-783738
I0217 11:56:56.139957 100380 main.go:141] libmachine: (ha-783738) DBG | I0217 11:56:56.139882 100416 retry.go:31] will retry after 1.840421804s: waiting for domain to come up
I0217 11:56:57.981618 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:56:57.982040 100380 main.go:141] libmachine: (ha-783738) DBG | unable to find current IP address of domain ha-783738 in network mk-ha-783738
I0217 11:56:57.982068 100380 main.go:141] libmachine: (ha-783738) DBG | I0217 11:56:57.981979 100416 retry.go:31] will retry after 1.8969141s: waiting for domain to come up
I0217 11:56:59.881026 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:56:59.881535 100380 main.go:141] libmachine: (ha-783738) DBG | unable to find current IP address of domain ha-783738 in network mk-ha-783738
I0217 11:56:59.881570 100380 main.go:141] libmachine: (ha-783738) DBG | I0217 11:56:59.881471 100416 retry.go:31] will retry after 1.890240518s: waiting for domain to come up
I0217 11:57:01.773274 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:01.773728 100380 main.go:141] libmachine: (ha-783738) DBG | unable to find current IP address of domain ha-783738 in network mk-ha-783738
I0217 11:57:01.773779 100380 main.go:141] libmachine: (ha-783738) DBG | I0217 11:57:01.773696 100416 retry.go:31] will retry after 3.046762911s: waiting for domain to come up
I0217 11:57:04.823999 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:04.824458 100380 main.go:141] libmachine: (ha-783738) DBG | unable to find current IP address of domain ha-783738 in network mk-ha-783738
I0217 11:57:04.824497 100380 main.go:141] libmachine: (ha-783738) DBG | I0217 11:57:04.824453 100416 retry.go:31] will retry after 3.819063496s: waiting for domain to come up
I0217 11:57:08.647831 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:08.648309 100380 main.go:141] libmachine: (ha-783738) found domain IP: 192.168.39.249
I0217 11:57:08.648334 100380 main.go:141] libmachine: (ha-783738) reserving static IP address...
I0217 11:57:08.648347 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has current primary IP address 192.168.39.249 and MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:08.648799 100380 main.go:141] libmachine: (ha-783738) DBG | found host DHCP lease matching {name: "ha-783738", mac: "52:54:00:fb:6f:65", ip: "192.168.39.249"} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:57:01 +0000 UTC Type:0 Mac:52:54:00:fb:6f:65 Iaid: IPaddr:192.168.39.249 Prefix:24 Hostname:ha-783738 Clientid:01:52:54:00:fb:6f:65}
I0217 11:57:08.648824 100380 main.go:141] libmachine: (ha-783738) DBG | skip adding static IP to network mk-ha-783738 - found existing host DHCP lease matching {name: "ha-783738", mac: "52:54:00:fb:6f:65", ip: "192.168.39.249"}
I0217 11:57:08.648835 100380 main.go:141] libmachine: (ha-783738) reserved static IP address 192.168.39.249 for domain ha-783738
I0217 11:57:08.648846 100380 main.go:141] libmachine: (ha-783738) waiting for SSH...
I0217 11:57:08.648862 100380 main.go:141] libmachine: (ha-783738) DBG | Getting to WaitForSSH function...
I0217 11:57:08.650828 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:08.651193 100380 main.go:141] libmachine: (ha-783738) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:fb:6f:65", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:57:01 +0000 UTC Type:0 Mac:52:54:00:fb:6f:65 Iaid: IPaddr:192.168.39.249 Prefix:24 Hostname:ha-783738 Clientid:01:52:54:00:fb:6f:65}
I0217 11:57:08.651224 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined IP address 192.168.39.249 and MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:08.651387 100380 main.go:141] libmachine: (ha-783738) DBG | Using SSH client type: external
I0217 11:57:08.651414 100380 main.go:141] libmachine: (ha-783738) DBG | Using SSH private key: /home/jenkins/minikube-integration/20427-77349/.minikube/machines/ha-783738/id_rsa (-rw-------)
I0217 11:57:08.651435 100380 main.go:141] libmachine: (ha-783738) DBG | &{[-F /dev/null -o ConnectionAttempts=3 -o ConnectTimeout=10 -o ControlMaster=no -o ControlPath=none -o LogLevel=quiet -o PasswordAuthentication=no -o ServerAliveInterval=60 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null docker@192.168.39.249 -o IdentitiesOnly=yes -i /home/jenkins/minikube-integration/20427-77349/.minikube/machines/ha-783738/id_rsa -p 22] /usr/bin/ssh <nil>}
I0217 11:57:08.651464 100380 main.go:141] libmachine: (ha-783738) DBG | About to run SSH command:
I0217 11:57:08.651480 100380 main.go:141] libmachine: (ha-783738) DBG | exit 0
I0217 11:57:08.776922 100380 main.go:141] libmachine: (ha-783738) DBG | SSH cmd err, output: <nil>:
I0217 11:57:08.777326 100380 main.go:141] libmachine: (ha-783738) Calling .GetConfigRaw
I0217 11:57:08.777959 100380 main.go:141] libmachine: (ha-783738) Calling .GetIP
I0217 11:57:08.780301 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:08.780692 100380 main.go:141] libmachine: (ha-783738) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:fb:6f:65", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:57:01 +0000 UTC Type:0 Mac:52:54:00:fb:6f:65 Iaid: IPaddr:192.168.39.249 Prefix:24 Hostname:ha-783738 Clientid:01:52:54:00:fb:6f:65}
I0217 11:57:08.780735 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined IP address 192.168.39.249 and MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:08.780948 100380 profile.go:143] Saving config to /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/config.json ...
I0217 11:57:08.781137 100380 machine.go:93] provisionDockerMachine start ...
I0217 11:57:08.781154 100380 main.go:141] libmachine: (ha-783738) Calling .DriverName
I0217 11:57:08.781442 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHHostname
I0217 11:57:08.783478 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:08.783868 100380 main.go:141] libmachine: (ha-783738) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:fb:6f:65", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:57:01 +0000 UTC Type:0 Mac:52:54:00:fb:6f:65 Iaid: IPaddr:192.168.39.249 Prefix:24 Hostname:ha-783738 Clientid:01:52:54:00:fb:6f:65}
I0217 11:57:08.783897 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined IP address 192.168.39.249 and MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:08.784048 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHPort
I0217 11:57:08.784237 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHKeyPath
I0217 11:57:08.784393 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHKeyPath
I0217 11:57:08.784570 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHUsername
I0217 11:57:08.784738 100380 main.go:141] libmachine: Using SSH client type: native
I0217 11:57:08.784917 100380 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x865ca0] 0x868980 <nil> [] 0s} 192.168.39.249 22 <nil> <nil>}
I0217 11:57:08.784928 100380 main.go:141] libmachine: About to run SSH command:
hostname
I0217 11:57:08.889484 100380 main.go:141] libmachine: SSH cmd err, output: <nil>: minikube
I0217 11:57:08.889525 100380 main.go:141] libmachine: (ha-783738) Calling .GetMachineName
I0217 11:57:08.889783 100380 buildroot.go:166] provisioning hostname "ha-783738"
I0217 11:57:08.889818 100380 main.go:141] libmachine: (ha-783738) Calling .GetMachineName
I0217 11:57:08.890003 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHHostname
I0217 11:57:08.892666 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:08.893027 100380 main.go:141] libmachine: (ha-783738) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:fb:6f:65", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:57:01 +0000 UTC Type:0 Mac:52:54:00:fb:6f:65 Iaid: IPaddr:192.168.39.249 Prefix:24 Hostname:ha-783738 Clientid:01:52:54:00:fb:6f:65}
I0217 11:57:08.893060 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined IP address 192.168.39.249 and MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:08.893202 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHPort
I0217 11:57:08.893391 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHKeyPath
I0217 11:57:08.893536 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHKeyPath
I0217 11:57:08.893661 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHUsername
I0217 11:57:08.893787 100380 main.go:141] libmachine: Using SSH client type: native
I0217 11:57:08.893949 100380 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x865ca0] 0x868980 <nil> [] 0s} 192.168.39.249 22 <nil> <nil>}
I0217 11:57:08.893960 100380 main.go:141] libmachine: About to run SSH command:
sudo hostname ha-783738 && echo "ha-783738" | sudo tee /etc/hostname
I0217 11:57:09.014626 100380 main.go:141] libmachine: SSH cmd err, output: <nil>: ha-783738
I0217 11:57:09.014653 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHHostname
I0217 11:57:09.017274 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:09.017710 100380 main.go:141] libmachine: (ha-783738) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:fb:6f:65", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:57:01 +0000 UTC Type:0 Mac:52:54:00:fb:6f:65 Iaid: IPaddr:192.168.39.249 Prefix:24 Hostname:ha-783738 Clientid:01:52:54:00:fb:6f:65}
I0217 11:57:09.017744 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined IP address 192.168.39.249 and MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:09.017939 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHPort
I0217 11:57:09.018131 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHKeyPath
I0217 11:57:09.018348 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHKeyPath
I0217 11:57:09.018473 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHUsername
I0217 11:57:09.018701 100380 main.go:141] libmachine: Using SSH client type: native
I0217 11:57:09.018967 100380 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x865ca0] 0x868980 <nil> [] 0s} 192.168.39.249 22 <nil> <nil>}
I0217 11:57:09.018994 100380 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\sha-783738' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 ha-783738/g' /etc/hosts;
else
echo '127.0.1.1 ha-783738' | sudo tee -a /etc/hosts;
fi
fi
I0217 11:57:09.133208 100380 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0217 11:57:09.133247 100380 buildroot.go:172] set auth options {CertDir:/home/jenkins/minikube-integration/20427-77349/.minikube CaCertPath:/home/jenkins/minikube-integration/20427-77349/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/20427-77349/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/20427-77349/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/20427-77349/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/20427-77349/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/20427-77349/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/20427-77349/.minikube}
I0217 11:57:09.133278 100380 buildroot.go:174] setting up certificates
I0217 11:57:09.133295 100380 provision.go:84] configureAuth start
I0217 11:57:09.133331 100380 main.go:141] libmachine: (ha-783738) Calling .GetMachineName
I0217 11:57:09.133680 100380 main.go:141] libmachine: (ha-783738) Calling .GetIP
I0217 11:57:09.136393 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:09.136746 100380 main.go:141] libmachine: (ha-783738) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:fb:6f:65", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:57:01 +0000 UTC Type:0 Mac:52:54:00:fb:6f:65 Iaid: IPaddr:192.168.39.249 Prefix:24 Hostname:ha-783738 Clientid:01:52:54:00:fb:6f:65}
I0217 11:57:09.136771 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined IP address 192.168.39.249 and MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:09.136918 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHHostname
I0217 11:57:09.139192 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:09.139545 100380 main.go:141] libmachine: (ha-783738) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:fb:6f:65", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:57:01 +0000 UTC Type:0 Mac:52:54:00:fb:6f:65 Iaid: IPaddr:192.168.39.249 Prefix:24 Hostname:ha-783738 Clientid:01:52:54:00:fb:6f:65}
I0217 11:57:09.139583 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined IP address 192.168.39.249 and MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:09.139699 100380 provision.go:143] copyHostCerts
I0217 11:57:09.139734 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/certs/ca.pem -> /home/jenkins/minikube-integration/20427-77349/.minikube/ca.pem
I0217 11:57:09.139786 100380 exec_runner.go:144] found /home/jenkins/minikube-integration/20427-77349/.minikube/ca.pem, removing ...
I0217 11:57:09.139804 100380 exec_runner.go:203] rm: /home/jenkins/minikube-integration/20427-77349/.minikube/ca.pem
I0217 11:57:09.139883 100380 exec_runner.go:151] cp: /home/jenkins/minikube-integration/20427-77349/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/20427-77349/.minikube/ca.pem (1082 bytes)
I0217 11:57:09.139996 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/certs/cert.pem -> /home/jenkins/minikube-integration/20427-77349/.minikube/cert.pem
I0217 11:57:09.140023 100380 exec_runner.go:144] found /home/jenkins/minikube-integration/20427-77349/.minikube/cert.pem, removing ...
I0217 11:57:09.140030 100380 exec_runner.go:203] rm: /home/jenkins/minikube-integration/20427-77349/.minikube/cert.pem
I0217 11:57:09.140079 100380 exec_runner.go:151] cp: /home/jenkins/minikube-integration/20427-77349/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/20427-77349/.minikube/cert.pem (1123 bytes)
I0217 11:57:09.140159 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/certs/key.pem -> /home/jenkins/minikube-integration/20427-77349/.minikube/key.pem
I0217 11:57:09.140184 100380 exec_runner.go:144] found /home/jenkins/minikube-integration/20427-77349/.minikube/key.pem, removing ...
I0217 11:57:09.140191 100380 exec_runner.go:203] rm: /home/jenkins/minikube-integration/20427-77349/.minikube/key.pem
I0217 11:57:09.140228 100380 exec_runner.go:151] cp: /home/jenkins/minikube-integration/20427-77349/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/20427-77349/.minikube/key.pem (1675 bytes)
I0217 11:57:09.140314 100380 provision.go:117] generating server cert: /home/jenkins/minikube-integration/20427-77349/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/20427-77349/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/20427-77349/.minikube/certs/ca-key.pem org=jenkins.ha-783738 san=[127.0.0.1 192.168.39.249 ha-783738 localhost minikube]
I0217 11:57:09.269804 100380 provision.go:177] copyRemoteCerts
I0217 11:57:09.269900 100380 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0217 11:57:09.269935 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHHostname
I0217 11:57:09.272592 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:09.272916 100380 main.go:141] libmachine: (ha-783738) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:fb:6f:65", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:57:01 +0000 UTC Type:0 Mac:52:54:00:fb:6f:65 Iaid: IPaddr:192.168.39.249 Prefix:24 Hostname:ha-783738 Clientid:01:52:54:00:fb:6f:65}
I0217 11:57:09.272945 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined IP address 192.168.39.249 and MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:09.273095 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHPort
I0217 11:57:09.273282 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHKeyPath
I0217 11:57:09.273464 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHUsername
I0217 11:57:09.273600 100380 sshutil.go:53] new ssh client: &{IP:192.168.39.249 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/20427-77349/.minikube/machines/ha-783738/id_rsa Username:docker}
I0217 11:57:09.355256 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/certs/ca.pem -> /etc/docker/ca.pem
I0217 11:57:09.355331 100380 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20427-77349/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I0217 11:57:09.378132 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/machines/server.pem -> /etc/docker/server.pem
I0217 11:57:09.378243 100380 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20427-77349/.minikube/machines/server.pem --> /etc/docker/server.pem (1200 bytes)
I0217 11:57:09.399749 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/machines/server-key.pem -> /etc/docker/server-key.pem
I0217 11:57:09.399830 100380 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20427-77349/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I0217 11:57:09.421183 100380 provision.go:87] duration metric: took 287.855291ms to configureAuth
I0217 11:57:09.421207 100380 buildroot.go:189] setting minikube options for container-runtime
I0217 11:57:09.421432 100380 config.go:182] Loaded profile config "ha-783738": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.32.1
I0217 11:57:09.421460 100380 main.go:141] libmachine: (ha-783738) Calling .DriverName
I0217 11:57:09.421765 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHHostname
I0217 11:57:09.424701 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:09.425141 100380 main.go:141] libmachine: (ha-783738) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:fb:6f:65", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:57:01 +0000 UTC Type:0 Mac:52:54:00:fb:6f:65 Iaid: IPaddr:192.168.39.249 Prefix:24 Hostname:ha-783738 Clientid:01:52:54:00:fb:6f:65}
I0217 11:57:09.425173 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined IP address 192.168.39.249 and MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:09.425370 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHPort
I0217 11:57:09.425557 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHKeyPath
I0217 11:57:09.425734 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHKeyPath
I0217 11:57:09.425883 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHUsername
I0217 11:57:09.426059 100380 main.go:141] libmachine: Using SSH client type: native
I0217 11:57:09.426283 100380 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x865ca0] 0x868980 <nil> [] 0s} 192.168.39.249 22 <nil> <nil>}
I0217 11:57:09.426297 100380 main.go:141] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I0217 11:57:09.534976 100380 main.go:141] libmachine: SSH cmd err, output: <nil>: tmpfs
I0217 11:57:09.535006 100380 buildroot.go:70] root file system type: tmpfs
I0217 11:57:09.535125 100380 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I0217 11:57:09.535163 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHHostname
I0217 11:57:09.537739 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:09.538108 100380 main.go:141] libmachine: (ha-783738) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:fb:6f:65", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:57:01 +0000 UTC Type:0 Mac:52:54:00:fb:6f:65 Iaid: IPaddr:192.168.39.249 Prefix:24 Hostname:ha-783738 Clientid:01:52:54:00:fb:6f:65}
I0217 11:57:09.538126 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined IP address 192.168.39.249 and MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:09.538307 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHPort
I0217 11:57:09.538481 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHKeyPath
I0217 11:57:09.538662 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHKeyPath
I0217 11:57:09.538802 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHUsername
I0217 11:57:09.538949 100380 main.go:141] libmachine: Using SSH client type: native
I0217 11:57:09.539142 100380 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x865ca0] 0x868980 <nil> [] 0s} 192.168.39.249 22 <nil> <nil>}
I0217 11:57:09.539243 100380 main.go:141] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network.target minikube-automount.service docker.socket
Requires= minikube-automount.service docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I0217 11:57:09.658326 100380 main.go:141] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network.target minikube-automount.service docker.socket
Requires= minikube-automount.service docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
I0217 11:57:09.658371 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHHostname
I0217 11:57:09.661372 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:09.661838 100380 main.go:141] libmachine: (ha-783738) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:fb:6f:65", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:57:01 +0000 UTC Type:0 Mac:52:54:00:fb:6f:65 Iaid: IPaddr:192.168.39.249 Prefix:24 Hostname:ha-783738 Clientid:01:52:54:00:fb:6f:65}
I0217 11:57:09.661875 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined IP address 192.168.39.249 and MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:09.662085 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHPort
I0217 11:57:09.662300 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHKeyPath
I0217 11:57:09.662435 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHKeyPath
I0217 11:57:09.662559 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHUsername
I0217 11:57:09.662707 100380 main.go:141] libmachine: Using SSH client type: native
I0217 11:57:09.662897 100380 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x865ca0] 0x868980 <nil> [] 0s} 192.168.39.249 22 <nil> <nil>}
I0217 11:57:09.662913 100380 main.go:141] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I0217 11:57:11.588699 100380 main.go:141] libmachine: SSH cmd err, output: <nil>: diff: can't stat '/lib/systemd/system/docker.service': No such file or directory
Created symlink /etc/systemd/system/multi-user.target.wants/docker.service → /usr/lib/systemd/system/docker.service.
I0217 11:57:11.588766 100380 machine.go:96] duration metric: took 2.807616414s to provisionDockerMachine
I0217 11:57:11.588782 100380 start.go:293] postStartSetup for "ha-783738" (driver="kvm2")
I0217 11:57:11.588792 100380 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0217 11:57:11.588810 100380 main.go:141] libmachine: (ha-783738) Calling .DriverName
I0217 11:57:11.589177 100380 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0217 11:57:11.589221 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHHostname
I0217 11:57:11.592192 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:11.592596 100380 main.go:141] libmachine: (ha-783738) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:fb:6f:65", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:57:01 +0000 UTC Type:0 Mac:52:54:00:fb:6f:65 Iaid: IPaddr:192.168.39.249 Prefix:24 Hostname:ha-783738 Clientid:01:52:54:00:fb:6f:65}
I0217 11:57:11.592627 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined IP address 192.168.39.249 and MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:11.592785 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHPort
I0217 11:57:11.592979 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHKeyPath
I0217 11:57:11.593170 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHUsername
I0217 11:57:11.593334 100380 sshutil.go:53] new ssh client: &{IP:192.168.39.249 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/20427-77349/.minikube/machines/ha-783738/id_rsa Username:docker}
I0217 11:57:11.675232 100380 ssh_runner.go:195] Run: cat /etc/os-release
I0217 11:57:11.679319 100380 info.go:137] Remote host: Buildroot 2023.02.9
I0217 11:57:11.679347 100380 filesync.go:126] Scanning /home/jenkins/minikube-integration/20427-77349/.minikube/addons for local assets ...
I0217 11:57:11.679434 100380 filesync.go:126] Scanning /home/jenkins/minikube-integration/20427-77349/.minikube/files for local assets ...
I0217 11:57:11.679553 100380 filesync.go:149] local asset: /home/jenkins/minikube-integration/20427-77349/.minikube/files/etc/ssl/certs/845022.pem -> 845022.pem in /etc/ssl/certs
I0217 11:57:11.679569 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/files/etc/ssl/certs/845022.pem -> /etc/ssl/certs/845022.pem
I0217 11:57:11.679700 100380 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I0217 11:57:11.688596 100380 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20427-77349/.minikube/files/etc/ssl/certs/845022.pem --> /etc/ssl/certs/845022.pem (1708 bytes)
I0217 11:57:11.712948 100380 start.go:296] duration metric: took 124.147315ms for postStartSetup
I0217 11:57:11.713041 100380 main.go:141] libmachine: (ha-783738) Calling .DriverName
I0217 11:57:11.713388 100380 ssh_runner.go:195] Run: sudo ls --almost-all -1 /var/lib/minikube/backup
I0217 11:57:11.713431 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHHostname
I0217 11:57:11.716109 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:11.716482 100380 main.go:141] libmachine: (ha-783738) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:fb:6f:65", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:57:01 +0000 UTC Type:0 Mac:52:54:00:fb:6f:65 Iaid: IPaddr:192.168.39.249 Prefix:24 Hostname:ha-783738 Clientid:01:52:54:00:fb:6f:65}
I0217 11:57:11.716509 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined IP address 192.168.39.249 and MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:11.716697 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHPort
I0217 11:57:11.716902 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHKeyPath
I0217 11:57:11.717111 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHUsername
I0217 11:57:11.717237 100380 sshutil.go:53] new ssh client: &{IP:192.168.39.249 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/20427-77349/.minikube/machines/ha-783738/id_rsa Username:docker}
I0217 11:57:11.799568 100380 machine.go:197] restoring vm config from /var/lib/minikube/backup: [etc]
I0217 11:57:11.799647 100380 ssh_runner.go:195] Run: sudo rsync --archive --update /var/lib/minikube/backup/etc /
I0217 11:57:11.840659 100380 fix.go:56] duration metric: took 21.521561421s for fixHost
I0217 11:57:11.840710 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHHostname
I0217 11:57:11.843711 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:11.844159 100380 main.go:141] libmachine: (ha-783738) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:fb:6f:65", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:57:01 +0000 UTC Type:0 Mac:52:54:00:fb:6f:65 Iaid: IPaddr:192.168.39.249 Prefix:24 Hostname:ha-783738 Clientid:01:52:54:00:fb:6f:65}
I0217 11:57:11.844211 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined IP address 192.168.39.249 and MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:11.844334 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHPort
I0217 11:57:11.844538 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHKeyPath
I0217 11:57:11.844685 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHKeyPath
I0217 11:57:11.844877 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHUsername
I0217 11:57:11.845064 100380 main.go:141] libmachine: Using SSH client type: native
I0217 11:57:11.845292 100380 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x865ca0] 0x868980 <nil> [] 0s} 192.168.39.249 22 <nil> <nil>}
I0217 11:57:11.845324 100380 main.go:141] libmachine: About to run SSH command:
date +%s.%N
I0217 11:57:11.961693 100380 main.go:141] libmachine: SSH cmd err, output: <nil>: 1739793431.919777749
I0217 11:57:11.961720 100380 fix.go:216] guest clock: 1739793431.919777749
I0217 11:57:11.961728 100380 fix.go:229] Guest: 2025-02-17 11:57:11.919777749 +0000 UTC Remote: 2025-02-17 11:57:11.840688548 +0000 UTC m=+21.663425668 (delta=79.089201ms)
I0217 11:57:11.961764 100380 fix.go:200] guest clock delta is within tolerance: 79.089201ms
I0217 11:57:11.961771 100380 start.go:83] releasing machines lock for "ha-783738", held for 21.642703542s
I0217 11:57:11.961797 100380 main.go:141] libmachine: (ha-783738) Calling .DriverName
I0217 11:57:11.962076 100380 main.go:141] libmachine: (ha-783738) Calling .GetIP
I0217 11:57:11.964739 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:11.965072 100380 main.go:141] libmachine: (ha-783738) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:fb:6f:65", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:57:01 +0000 UTC Type:0 Mac:52:54:00:fb:6f:65 Iaid: IPaddr:192.168.39.249 Prefix:24 Hostname:ha-783738 Clientid:01:52:54:00:fb:6f:65}
I0217 11:57:11.965098 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined IP address 192.168.39.249 and MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:11.965245 100380 main.go:141] libmachine: (ha-783738) Calling .DriverName
I0217 11:57:11.965780 100380 main.go:141] libmachine: (ha-783738) Calling .DriverName
I0217 11:57:11.965938 100380 main.go:141] libmachine: (ha-783738) Calling .DriverName
I0217 11:57:11.966020 100380 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0217 11:57:11.966085 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHHostname
I0217 11:57:11.966153 100380 ssh_runner.go:195] Run: cat /version.json
I0217 11:57:11.966182 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHHostname
I0217 11:57:11.968710 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:11.968814 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:11.969180 100380 main.go:141] libmachine: (ha-783738) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:fb:6f:65", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:57:01 +0000 UTC Type:0 Mac:52:54:00:fb:6f:65 Iaid: IPaddr:192.168.39.249 Prefix:24 Hostname:ha-783738 Clientid:01:52:54:00:fb:6f:65}
I0217 11:57:11.969211 100380 main.go:141] libmachine: (ha-783738) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:fb:6f:65", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:57:01 +0000 UTC Type:0 Mac:52:54:00:fb:6f:65 Iaid: IPaddr:192.168.39.249 Prefix:24 Hostname:ha-783738 Clientid:01:52:54:00:fb:6f:65}
I0217 11:57:11.969228 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined IP address 192.168.39.249 and MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:11.969243 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined IP address 192.168.39.249 and MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:11.969400 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHPort
I0217 11:57:11.969505 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHPort
I0217 11:57:11.969573 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHKeyPath
I0217 11:57:11.969654 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHKeyPath
I0217 11:57:11.969705 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHUsername
I0217 11:57:11.969780 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHUsername
I0217 11:57:11.969855 100380 sshutil.go:53] new ssh client: &{IP:192.168.39.249 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/20427-77349/.minikube/machines/ha-783738/id_rsa Username:docker}
I0217 11:57:11.969896 100380 sshutil.go:53] new ssh client: &{IP:192.168.39.249 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/20427-77349/.minikube/machines/ha-783738/id_rsa Username:docker}
I0217 11:57:12.070993 100380 ssh_runner.go:195] Run: systemctl --version
I0217 11:57:12.076962 100380 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W0217 11:57:12.082069 100380 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I0217 11:57:12.082164 100380 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0217 11:57:12.097308 100380 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
I0217 11:57:12.097353 100380 start.go:495] detecting cgroup driver to use...
I0217 11:57:12.097502 100380 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0217 11:57:12.116857 100380 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10"|' /etc/containerd/config.toml"
I0217 11:57:12.128177 100380 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0217 11:57:12.139383 100380 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I0217 11:57:12.139433 100380 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I0217 11:57:12.150535 100380 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0217 11:57:12.161824 100380 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0217 11:57:12.173075 100380 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0217 11:57:12.184735 100380 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0217 11:57:12.196065 100380 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0217 11:57:12.206061 100380 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0217 11:57:12.215826 100380 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0217 11:57:12.225719 100380 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0217 11:57:12.234589 100380 crio.go:166] couldn't verify netfilter by "sudo sysctl net.bridge.bridge-nf-call-iptables" which might be okay. error: sudo sysctl net.bridge.bridge-nf-call-iptables: Process exited with status 255
stdout:
stderr:
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
I0217 11:57:12.234644 100380 ssh_runner.go:195] Run: sudo modprobe br_netfilter
I0217 11:57:12.244581 100380 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0217 11:57:12.253602 100380 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0217 11:57:12.359116 100380 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0217 11:57:12.382906 100380 start.go:495] detecting cgroup driver to use...
I0217 11:57:12.383010 100380 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I0217 11:57:12.408300 100380 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I0217 11:57:12.424027 100380 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
I0217 11:57:12.444833 100380 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I0217 11:57:12.457628 100380 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0217 11:57:12.470140 100380 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I0217 11:57:12.497764 100380 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0217 11:57:12.511071 100380 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I0217 11:57:12.529141 100380 ssh_runner.go:195] Run: which cri-dockerd
I0217 11:57:12.532846 100380 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I0217 11:57:12.541895 100380 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (190 bytes)
I0217 11:57:12.557198 100380 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I0217 11:57:12.670128 100380 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I0217 11:57:12.796263 100380 docker.go:574] configuring docker to use "cgroupfs" as cgroup driver...
I0217 11:57:12.796399 100380 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
I0217 11:57:12.812229 100380 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0217 11:57:12.923350 100380 ssh_runner.go:195] Run: sudo systemctl restart docker
I0217 11:57:15.351609 100380 ssh_runner.go:235] Completed: sudo systemctl restart docker: (2.428206669s)
I0217 11:57:15.351699 100380 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I0217 11:57:15.364852 100380 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0217 11:57:15.377423 100380 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I0217 11:57:15.493635 100380 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I0217 11:57:15.621524 100380 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0217 11:57:15.730858 100380 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I0217 11:57:15.748138 100380 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0217 11:57:15.761818 100380 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0217 11:57:15.881775 100380 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I0217 11:57:15.960772 100380 start.go:542] Will wait 60s for socket path /var/run/cri-dockerd.sock
I0217 11:57:15.960858 100380 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I0217 11:57:15.966411 100380 start.go:563] Will wait 60s for crictl version
I0217 11:57:15.966517 100380 ssh_runner.go:195] Run: which crictl
I0217 11:57:15.974036 100380 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0217 11:57:16.011837 100380 start.go:579] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 27.4.0
RuntimeApiVersion: v1
I0217 11:57:16.011912 100380 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0217 11:57:16.036945 100380 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0217 11:57:16.060974 100380 out.go:235] * Preparing Kubernetes v1.32.1 on Docker 27.4.0 ...
I0217 11:57:16.061031 100380 main.go:141] libmachine: (ha-783738) Calling .GetIP
I0217 11:57:16.063810 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:16.064255 100380 main.go:141] libmachine: (ha-783738) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:fb:6f:65", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:57:01 +0000 UTC Type:0 Mac:52:54:00:fb:6f:65 Iaid: IPaddr:192.168.39.249 Prefix:24 Hostname:ha-783738 Clientid:01:52:54:00:fb:6f:65}
I0217 11:57:16.064298 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined IP address 192.168.39.249 and MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:16.064499 100380 ssh_runner.go:195] Run: grep 192.168.39.1 host.minikube.internal$ /etc/hosts
I0217 11:57:16.068464 100380 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.39.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0217 11:57:16.080668 100380 kubeadm.go:883] updating cluster {Name:ha-783738 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube/iso/minikube-v1.35.0-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.46-1739182054-20387@sha256:3788b0691001f3da958b3956b3e6c1d1db8535d5286bd2e096e6e75dc609dbad Memory:2200 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.32.1 ClusterName:ha-783738 Namespace:
default APIServerHAVIP:192.168.39.254 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.39.249 Port:8443 KubernetesVersion:v1.32.1 ContainerRuntime:docker ControlPlane:true Worker:true} {Name:m02 IP:192.168.39.31 Port:8443 KubernetesVersion:v1.32.1 ContainerRuntime:docker ControlPlane:true Worker:true} {Name:m04 IP:192.168.39.168 Port:0 KubernetesVersion:v1.32.1 ContainerRuntime:docker ControlPlane:false Worker:true}] Addons:map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:false efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-
gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:false storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizatio
ns:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I0217 11:57:16.080804 100380 preload.go:131] Checking if preload exists for k8s version v1.32.1 and runtime docker
I0217 11:57:16.080849 100380 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0217 11:57:16.098890 100380 docker.go:689] Got preloaded images: -- stdout --
kindest/kindnetd:v20250214-acbabc1a
registry.k8s.io/kube-apiserver:v1.32.1
registry.k8s.io/kube-scheduler:v1.32.1
registry.k8s.io/kube-controller-manager:v1.32.1
registry.k8s.io/kube-proxy:v1.32.1
ghcr.io/kube-vip/kube-vip:v0.8.9
registry.k8s.io/etcd:3.5.16-0
registry.k8s.io/coredns/coredns:v1.11.3
registry.k8s.io/pause:3.10
gcr.io/k8s-minikube/storage-provisioner:v5
gcr.io/k8s-minikube/busybox:1.28
-- /stdout --
I0217 11:57:16.098911 100380 docker.go:619] Images already preloaded, skipping extraction
I0217 11:57:16.098974 100380 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0217 11:57:16.116506 100380 docker.go:689] Got preloaded images: -- stdout --
kindest/kindnetd:v20250214-acbabc1a
registry.k8s.io/kube-apiserver:v1.32.1
registry.k8s.io/kube-scheduler:v1.32.1
registry.k8s.io/kube-controller-manager:v1.32.1
registry.k8s.io/kube-proxy:v1.32.1
ghcr.io/kube-vip/kube-vip:v0.8.9
registry.k8s.io/etcd:3.5.16-0
registry.k8s.io/coredns/coredns:v1.11.3
registry.k8s.io/pause:3.10
gcr.io/k8s-minikube/storage-provisioner:v5
gcr.io/k8s-minikube/busybox:1.28
-- /stdout --
I0217 11:57:16.116540 100380 cache_images.go:84] Images are preloaded, skipping loading
I0217 11:57:16.116556 100380 kubeadm.go:934] updating node { 192.168.39.249 8443 v1.32.1 docker true true} ...
I0217 11:57:16.116703 100380 kubeadm.go:946] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.32.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=ha-783738 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.39.249
[Install]
config:
{KubernetesVersion:v1.32.1 ClusterName:ha-783738 Namespace:default APIServerHAVIP:192.168.39.254 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0217 11:57:16.116764 100380 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
I0217 11:57:16.164431 100380 cni.go:84] Creating CNI manager for ""
I0217 11:57:16.164455 100380 cni.go:136] multinode detected (3 nodes found), recommending kindnet
I0217 11:57:16.164469 100380 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
I0217 11:57:16.164499 100380 kubeadm.go:189] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.39.249 APIServerPort:8443 KubernetesVersion:v1.32.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:ha-783738 NodeName:ha-783738 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.39.249"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.39.249 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kuberne
tes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I0217 11:57:16.164682 100380 kubeadm.go:195] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.39.249
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
name: "ha-783738"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.39.249"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.39.249"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
- name: "proxy-refresh-interval"
value: "70000"
kubernetesVersion: v1.32.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I0217 11:57:16.164704 100380 kube-vip.go:115] generating kube-vip config ...
I0217 11:57:16.164766 100380 ssh_runner.go:195] Run: sudo sh -c "modprobe --all ip_vs ip_vs_rr ip_vs_wrr ip_vs_sh nf_conntrack"
I0217 11:57:16.178981 100380 kube-vip.go:167] auto-enabling control-plane load-balancing in kube-vip
I0217 11:57:16.179102 100380 kube-vip.go:137] kube-vip config:
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
name: kube-vip
namespace: kube-system
spec:
containers:
- args:
- manager
env:
- name: vip_arp
value: "true"
- name: port
value: "8443"
- name: vip_nodename
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: vip_interface
value: eth0
- name: vip_cidr
value: "32"
- name: dns_mode
value: first
- name: cp_enable
value: "true"
- name: cp_namespace
value: kube-system
- name: vip_leaderelection
value: "true"
- name: vip_leasename
value: plndr-cp-lock
- name: vip_leaseduration
value: "5"
- name: vip_renewdeadline
value: "3"
- name: vip_retryperiod
value: "1"
- name: address
value: 192.168.39.254
- name: prometheus_server
value: :2112
- name : lb_enable
value: "true"
- name: lb_port
value: "8443"
image: ghcr.io/kube-vip/kube-vip:v0.8.9
imagePullPolicy: IfNotPresent
name: kube-vip
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_RAW
volumeMounts:
- mountPath: /etc/kubernetes/admin.conf
name: kubeconfig
hostAliases:
- hostnames:
- kubernetes
ip: 127.0.0.1
hostNetwork: true
volumes:
- hostPath:
path: "/etc/kubernetes/admin.conf"
name: kubeconfig
status: {}
I0217 11:57:16.179161 100380 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.32.1
I0217 11:57:16.189237 100380 binaries.go:44] Found k8s binaries, skipping transfer
I0217 11:57:16.189321 100380 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube /etc/kubernetes/manifests
I0217 11:57:16.198727 100380 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (310 bytes)
I0217 11:57:16.214787 100380 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0217 11:57:16.231014 100380 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2293 bytes)
I0217 11:57:16.246729 100380 ssh_runner.go:362] scp memory --> /etc/kubernetes/manifests/kube-vip.yaml (1441 bytes)
I0217 11:57:16.261779 100380 ssh_runner.go:195] Run: grep 192.168.39.254 control-plane.minikube.internal$ /etc/hosts
I0217 11:57:16.265453 100380 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.39.254 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0217 11:57:16.276521 100380 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0217 11:57:16.384249 100380 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0217 11:57:16.401291 100380 certs.go:68] Setting up /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738 for IP: 192.168.39.249
I0217 11:57:16.401328 100380 certs.go:194] generating shared ca certs ...
I0217 11:57:16.401350 100380 certs.go:226] acquiring lock for ca certs: {Name:mk7093571229e43ae88bf2507ccc9fd2cd05388e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0217 11:57:16.401508 100380 certs.go:235] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/20427-77349/.minikube/ca.key
I0217 11:57:16.401544 100380 certs.go:235] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/20427-77349/.minikube/proxy-client-ca.key
I0217 11:57:16.401555 100380 certs.go:256] generating profile certs ...
I0217 11:57:16.401635 100380 certs.go:359] skipping valid signed profile cert regeneration for "minikube-user": /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/client.key
I0217 11:57:16.401660 100380 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/apiserver.key.1b1cbf3b
I0217 11:57:16.401671 100380 crypto.go:68] Generating cert /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/apiserver.crt.1b1cbf3b with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.39.249 192.168.39.31 192.168.39.254]
I0217 11:57:16.475033 100380 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/apiserver.crt.1b1cbf3b ...
I0217 11:57:16.475062 100380 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/apiserver.crt.1b1cbf3b: {Name:mkcae1f9f128e66451afcd5b133e6826e9862cbe Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0217 11:57:16.475228 100380 crypto.go:164] Writing key to /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/apiserver.key.1b1cbf3b ...
I0217 11:57:16.475243 100380 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/apiserver.key.1b1cbf3b: {Name:mk484c481609a3c2ed473dfecb8f5468118b1367 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0217 11:57:16.475330 100380 certs.go:381] copying /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/apiserver.crt.1b1cbf3b -> /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/apiserver.crt
I0217 11:57:16.475492 100380 certs.go:385] copying /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/apiserver.key.1b1cbf3b -> /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/apiserver.key
I0217 11:57:16.475629 100380 certs.go:359] skipping valid signed profile cert regeneration for "aggregator": /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/proxy-client.key
I0217 11:57:16.475644 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/ca.crt -> /var/lib/minikube/certs/ca.crt
I0217 11:57:16.475656 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/ca.key -> /var/lib/minikube/certs/ca.key
I0217 11:57:16.475671 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/proxy-client-ca.crt -> /var/lib/minikube/certs/proxy-client-ca.crt
I0217 11:57:16.475699 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/proxy-client-ca.key -> /var/lib/minikube/certs/proxy-client-ca.key
I0217 11:57:16.475714 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/apiserver.crt -> /var/lib/minikube/certs/apiserver.crt
I0217 11:57:16.475726 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/apiserver.key -> /var/lib/minikube/certs/apiserver.key
I0217 11:57:16.475737 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/proxy-client.crt -> /var/lib/minikube/certs/proxy-client.crt
I0217 11:57:16.475748 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/proxy-client.key -> /var/lib/minikube/certs/proxy-client.key
I0217 11:57:16.475800 100380 certs.go:484] found cert: /home/jenkins/minikube-integration/20427-77349/.minikube/certs/84502.pem (1338 bytes)
W0217 11:57:16.475831 100380 certs.go:480] ignoring /home/jenkins/minikube-integration/20427-77349/.minikube/certs/84502_empty.pem, impossibly tiny 0 bytes
I0217 11:57:16.475839 100380 certs.go:484] found cert: /home/jenkins/minikube-integration/20427-77349/.minikube/certs/ca-key.pem (1679 bytes)
I0217 11:57:16.475861 100380 certs.go:484] found cert: /home/jenkins/minikube-integration/20427-77349/.minikube/certs/ca.pem (1082 bytes)
I0217 11:57:16.475900 100380 certs.go:484] found cert: /home/jenkins/minikube-integration/20427-77349/.minikube/certs/cert.pem (1123 bytes)
I0217 11:57:16.475927 100380 certs.go:484] found cert: /home/jenkins/minikube-integration/20427-77349/.minikube/certs/key.pem (1675 bytes)
I0217 11:57:16.476002 100380 certs.go:484] found cert: /home/jenkins/minikube-integration/20427-77349/.minikube/files/etc/ssl/certs/845022.pem (1708 bytes)
I0217 11:57:16.476031 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/certs/84502.pem -> /usr/share/ca-certificates/84502.pem
I0217 11:57:16.476046 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/files/etc/ssl/certs/845022.pem -> /usr/share/ca-certificates/845022.pem
I0217 11:57:16.476058 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/ca.crt -> /usr/share/ca-certificates/minikubeCA.pem
I0217 11:57:16.476652 100380 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20427-77349/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0217 11:57:16.507138 100380 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20427-77349/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I0217 11:57:16.534527 100380 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20427-77349/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0217 11:57:16.562922 100380 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20427-77349/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I0217 11:57:16.587311 100380 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1432 bytes)
I0217 11:57:16.624087 100380 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
I0217 11:57:16.662037 100380 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0217 11:57:16.713619 100380 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I0217 11:57:16.756345 100380 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20427-77349/.minikube/certs/84502.pem --> /usr/share/ca-certificates/84502.pem (1338 bytes)
I0217 11:57:16.803520 100380 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20427-77349/.minikube/files/etc/ssl/certs/845022.pem --> /usr/share/ca-certificates/845022.pem (1708 bytes)
I0217 11:57:16.846879 100380 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20427-77349/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0217 11:57:16.920267 100380 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I0217 11:57:16.950648 100380 ssh_runner.go:195] Run: openssl version
I0217 11:57:16.958784 100380 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/84502.pem && ln -fs /usr/share/ca-certificates/84502.pem /etc/ssl/certs/84502.pem"
I0217 11:57:16.987238 100380 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/84502.pem
I0217 11:57:16.994220 100380 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Feb 17 11:42 /usr/share/ca-certificates/84502.pem
I0217 11:57:16.994283 100380 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/84502.pem
I0217 11:57:17.016466 100380 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/84502.pem /etc/ssl/certs/51391683.0"
I0217 11:57:17.039972 100380 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/845022.pem && ln -fs /usr/share/ca-certificates/845022.pem /etc/ssl/certs/845022.pem"
I0217 11:57:17.061818 100380 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/845022.pem
I0217 11:57:17.068988 100380 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Feb 17 11:42 /usr/share/ca-certificates/845022.pem
I0217 11:57:17.069057 100380 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/845022.pem
I0217 11:57:17.075953 100380 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/845022.pem /etc/ssl/certs/3ec20f2e.0"
I0217 11:57:17.094161 100380 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0217 11:57:17.111313 100380 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0217 11:57:17.116268 100380 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Feb 17 11:35 /usr/share/ca-certificates/minikubeCA.pem
I0217 11:57:17.116335 100380 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0217 11:57:17.122743 100380 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0217 11:57:17.141827 100380 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0217 11:57:17.146771 100380 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-etcd-client.crt -checkend 86400
I0217 11:57:17.158301 100380 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-kubelet-client.crt -checkend 86400
I0217 11:57:17.170200 100380 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/server.crt -checkend 86400
I0217 11:57:17.177413 100380 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/healthcheck-client.crt -checkend 86400
I0217 11:57:17.186556 100380 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/peer.crt -checkend 86400
I0217 11:57:17.193933 100380 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/front-proxy-client.crt -checkend 86400
I0217 11:57:17.203839 100380 kubeadm.go:392] StartCluster: {Name:ha-783738 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube/iso/minikube-v1.35.0-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.46-1739182054-20387@sha256:3788b0691001f3da958b3956b3e6c1d1db8535d5286bd2e096e6e75dc609dbad Memory:2200 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.32.1 ClusterName:ha-783738 Namespace:def
ault APIServerHAVIP:192.168.39.254 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.39.249 Port:8443 KubernetesVersion:v1.32.1 ContainerRuntime:docker ControlPlane:true Worker:true} {Name:m02 IP:192.168.39.31 Port:8443 KubernetesVersion:v1.32.1 ContainerRuntime:docker ControlPlane:true Worker:true} {Name:m04 IP:192.168.39.168 Port:0 KubernetesVersion:v1.32.1 ContainerRuntime:docker ControlPlane:false Worker:true}] Addons:map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:false efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gad
get:false istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:false storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:
false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0217 11:57:17.204089 100380 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I0217 11:57:17.225257 100380 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I0217 11:57:17.236858 100380 kubeadm.go:408] found existing configuration files, will attempt cluster restart
I0217 11:57:17.236876 100380 kubeadm.go:593] restartPrimaryControlPlane start ...
I0217 11:57:17.236920 100380 ssh_runner.go:195] Run: sudo test -d /data/minikube
I0217 11:57:17.246285 100380 kubeadm.go:130] /data/minikube skipping compat symlinks: sudo test -d /data/minikube: Process exited with status 1
stdout:
stderr:
I0217 11:57:17.246828 100380 kubeconfig.go:47] verify endpoint returned: get endpoint: "ha-783738" does not appear in /home/jenkins/minikube-integration/20427-77349/kubeconfig
I0217 11:57:17.246986 100380 kubeconfig.go:62] /home/jenkins/minikube-integration/20427-77349/kubeconfig needs updating (will repair): [kubeconfig missing "ha-783738" cluster setting kubeconfig missing "ha-783738" context setting]
I0217 11:57:17.247367 100380 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20427-77349/kubeconfig: {Name:mka23a5c17f10bb58374e83755a2ac6a44464e11 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0217 11:57:17.247895 100380 loader.go:402] Config loaded from file: /home/jenkins/minikube-integration/20427-77349/kubeconfig
I0217 11:57:17.248117 100380 kapi.go:59] client config for ha-783738: &rest.Config{Host:"https://192.168.39.249:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/client.crt", KeyFile:"/home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/client.key", CAFile:"/home/jenkins/minikube-integration/20427-77349/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(nil), CAData:[]uint8(nil), NextProtos:[]string(nil)
}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x24df700), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
I0217 11:57:17.248591 100380 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false
I0217 11:57:17.248610 100380 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false
I0217 11:57:17.248615 100380 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false
I0217 11:57:17.248619 100380 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false
I0217 11:57:17.248634 100380 cert_rotation.go:140] Starting client certificate rotation controller
I0217 11:57:17.249054 100380 ssh_runner.go:195] Run: sudo diff -u /var/tmp/minikube/kubeadm.yaml /var/tmp/minikube/kubeadm.yaml.new
I0217 11:57:17.258029 100380 kubeadm.go:630] The running cluster does not require reconfiguration: 192.168.39.249
I0217 11:57:17.258053 100380 kubeadm.go:597] duration metric: took 21.170416ms to restartPrimaryControlPlane
I0217 11:57:17.258062 100380 kubeadm.go:394] duration metric: took 54.240079ms to StartCluster
I0217 11:57:17.258077 100380 settings.go:142] acquiring lock: {Name:mkf730c657b1c2d5a481dbeb02dabe7dfa17f2d2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0217 11:57:17.258150 100380 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/20427-77349/kubeconfig
I0217 11:57:17.258639 100380 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20427-77349/kubeconfig: {Name:mka23a5c17f10bb58374e83755a2ac6a44464e11 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0217 11:57:17.258848 100380 start.go:233] HA (multi-control plane) cluster: will skip waiting for primary control-plane node &{Name: IP:192.168.39.249 Port:8443 KubernetesVersion:v1.32.1 ContainerRuntime:docker ControlPlane:true Worker:true}
I0217 11:57:17.258870 100380 start.go:241] waiting for startup goroutines ...
I0217 11:57:17.258884 100380 addons.go:511] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:false efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:false storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I0217 11:57:17.259112 100380 config.go:182] Loaded profile config "ha-783738": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.32.1
I0217 11:57:17.261397 100380 out.go:177] * Enabled addons:
I0217 11:57:17.262668 100380 addons.go:514] duration metric: took 3.785415ms for enable addons: enabled=[]
I0217 11:57:17.262703 100380 start.go:246] waiting for cluster config update ...
I0217 11:57:17.262713 100380 start.go:255] writing updated cluster config ...
I0217 11:57:17.264127 100380 out.go:201]
I0217 11:57:17.265577 100380 config.go:182] Loaded profile config "ha-783738": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.32.1
I0217 11:57:17.265703 100380 profile.go:143] Saving config to /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/config.json ...
I0217 11:57:17.267570 100380 out.go:177] * Starting "ha-783738-m02" control-plane node in "ha-783738" cluster
I0217 11:57:17.268921 100380 preload.go:131] Checking if preload exists for k8s version v1.32.1 and runtime docker
I0217 11:57:17.268950 100380 cache.go:56] Caching tarball of preloaded images
I0217 11:57:17.269061 100380 preload.go:172] Found /home/jenkins/minikube-integration/20427-77349/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.32.1-docker-overlay2-amd64.tar.lz4 in cache, skipping download
I0217 11:57:17.269074 100380 cache.go:59] Finished verifying existence of preloaded tar for v1.32.1 on docker
I0217 11:57:17.269250 100380 profile.go:143] Saving config to /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/config.json ...
I0217 11:57:17.269484 100380 start.go:360] acquireMachinesLock for ha-783738-m02: {Name:mk05ba8323ae77ab7dcc14c378d65810d956fdc0 Clock:{} Delay:500ms Timeout:13m0s Cancel:<nil>}
I0217 11:57:17.269554 100380 start.go:364] duration metric: took 46.103µs to acquireMachinesLock for "ha-783738-m02"
I0217 11:57:17.269576 100380 start.go:96] Skipping create...Using existing machine configuration
I0217 11:57:17.269584 100380 fix.go:54] fixHost starting: m02
I0217 11:57:17.269846 100380 main.go:141] libmachine: Found binary path at /home/jenkins/workspace/KVM_Linux_integration/out/docker-machine-driver-kvm2
I0217 11:57:17.269891 100380 main.go:141] libmachine: Launching plugin server for driver kvm2
I0217 11:57:17.284961 100380 main.go:141] libmachine: Plugin server listening at address 127.0.0.1:45093
I0217 11:57:17.285438 100380 main.go:141] libmachine: () Calling .GetVersion
I0217 11:57:17.285964 100380 main.go:141] libmachine: Using API Version 1
I0217 11:57:17.285991 100380 main.go:141] libmachine: () Calling .SetConfigRaw
I0217 11:57:17.286358 100380 main.go:141] libmachine: () Calling .GetMachineName
I0217 11:57:17.286562 100380 main.go:141] libmachine: (ha-783738-m02) Calling .DriverName
I0217 11:57:17.286744 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetState
I0217 11:57:17.288288 100380 fix.go:112] recreateIfNeeded on ha-783738-m02: state=Stopped err=<nil>
I0217 11:57:17.288317 100380 main.go:141] libmachine: (ha-783738-m02) Calling .DriverName
W0217 11:57:17.288473 100380 fix.go:138] unexpected machine state, will restart: <nil>
I0217 11:57:17.290496 100380 out.go:177] * Restarting existing kvm2 VM for "ha-783738-m02" ...
I0217 11:57:17.291737 100380 main.go:141] libmachine: (ha-783738-m02) Calling .Start
I0217 11:57:17.291936 100380 main.go:141] libmachine: (ha-783738-m02) starting domain...
I0217 11:57:17.291957 100380 main.go:141] libmachine: (ha-783738-m02) ensuring networks are active...
I0217 11:57:17.292625 100380 main.go:141] libmachine: (ha-783738-m02) Ensuring network default is active
I0217 11:57:17.292935 100380 main.go:141] libmachine: (ha-783738-m02) Ensuring network mk-ha-783738 is active
I0217 11:57:17.293260 100380 main.go:141] libmachine: (ha-783738-m02) getting domain XML...
I0217 11:57:17.293893 100380 main.go:141] libmachine: (ha-783738-m02) creating domain...
I0217 11:57:18.506378 100380 main.go:141] libmachine: (ha-783738-m02) waiting for IP...
I0217 11:57:18.507364 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:18.507881 100380 main.go:141] libmachine: (ha-783738-m02) DBG | unable to find current IP address of domain ha-783738-m02 in network mk-ha-783738
I0217 11:57:18.507974 100380 main.go:141] libmachine: (ha-783738-m02) DBG | I0217 11:57:18.507878 100573 retry.go:31] will retry after 190.071186ms: waiting for domain to come up
I0217 11:57:18.699203 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:18.699617 100380 main.go:141] libmachine: (ha-783738-m02) DBG | unable to find current IP address of domain ha-783738-m02 in network mk-ha-783738
I0217 11:57:18.699682 100380 main.go:141] libmachine: (ha-783738-m02) DBG | I0217 11:57:18.699590 100573 retry.go:31] will retry after 254.022024ms: waiting for domain to come up
I0217 11:57:18.955132 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:18.955578 100380 main.go:141] libmachine: (ha-783738-m02) DBG | unable to find current IP address of domain ha-783738-m02 in network mk-ha-783738
I0217 11:57:18.955602 100380 main.go:141] libmachine: (ha-783738-m02) DBG | I0217 11:57:18.955533 100573 retry.go:31] will retry after 332.594264ms: waiting for domain to come up
I0217 11:57:19.290041 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:19.290494 100380 main.go:141] libmachine: (ha-783738-m02) DBG | unable to find current IP address of domain ha-783738-m02 in network mk-ha-783738
I0217 11:57:19.290519 100380 main.go:141] libmachine: (ha-783738-m02) DBG | I0217 11:57:19.290472 100573 retry.go:31] will retry after 550.484931ms: waiting for domain to come up
I0217 11:57:19.842363 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:19.842844 100380 main.go:141] libmachine: (ha-783738-m02) DBG | unable to find current IP address of domain ha-783738-m02 in network mk-ha-783738
I0217 11:57:19.842873 100380 main.go:141] libmachine: (ha-783738-m02) DBG | I0217 11:57:19.842822 100573 retry.go:31] will retry after 743.60757ms: waiting for domain to come up
I0217 11:57:20.587667 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:20.588025 100380 main.go:141] libmachine: (ha-783738-m02) DBG | unable to find current IP address of domain ha-783738-m02 in network mk-ha-783738
I0217 11:57:20.588058 100380 main.go:141] libmachine: (ha-783738-m02) DBG | I0217 11:57:20.587981 100573 retry.go:31] will retry after 701.750144ms: waiting for domain to come up
I0217 11:57:21.290980 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:21.291500 100380 main.go:141] libmachine: (ha-783738-m02) DBG | unable to find current IP address of domain ha-783738-m02 in network mk-ha-783738
I0217 11:57:21.291530 100380 main.go:141] libmachine: (ha-783738-m02) DBG | I0217 11:57:21.291445 100573 retry.go:31] will retry after 755.313925ms: waiting for domain to come up
I0217 11:57:22.047876 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:22.048286 100380 main.go:141] libmachine: (ha-783738-m02) DBG | unable to find current IP address of domain ha-783738-m02 in network mk-ha-783738
I0217 11:57:22.048318 100380 main.go:141] libmachine: (ha-783738-m02) DBG | I0217 11:57:22.048246 100573 retry.go:31] will retry after 1.338224716s: waiting for domain to come up
I0217 11:57:23.388238 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:23.388759 100380 main.go:141] libmachine: (ha-783738-m02) DBG | unable to find current IP address of domain ha-783738-m02 in network mk-ha-783738
I0217 11:57:23.388796 100380 main.go:141] libmachine: (ha-783738-m02) DBG | I0217 11:57:23.388727 100573 retry.go:31] will retry after 1.367661407s: waiting for domain to come up
I0217 11:57:24.758376 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:24.758722 100380 main.go:141] libmachine: (ha-783738-m02) DBG | unable to find current IP address of domain ha-783738-m02 in network mk-ha-783738
I0217 11:57:24.758764 100380 main.go:141] libmachine: (ha-783738-m02) DBG | I0217 11:57:24.758718 100573 retry.go:31] will retry after 2.08548116s: waiting for domain to come up
I0217 11:57:26.846621 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:26.847150 100380 main.go:141] libmachine: (ha-783738-m02) DBG | unable to find current IP address of domain ha-783738-m02 in network mk-ha-783738
I0217 11:57:26.847253 100380 main.go:141] libmachine: (ha-783738-m02) DBG | I0217 11:57:26.847166 100573 retry.go:31] will retry after 1.933968455s: waiting for domain to come up
I0217 11:57:28.782369 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:28.782785 100380 main.go:141] libmachine: (ha-783738-m02) DBG | unable to find current IP address of domain ha-783738-m02 in network mk-ha-783738
I0217 11:57:28.782815 100380 main.go:141] libmachine: (ha-783738-m02) DBG | I0217 11:57:28.782752 100573 retry.go:31] will retry after 3.162167749s: waiting for domain to come up
I0217 11:57:31.947188 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:31.947578 100380 main.go:141] libmachine: (ha-783738-m02) DBG | unable to find current IP address of domain ha-783738-m02 in network mk-ha-783738
I0217 11:57:31.947603 100380 main.go:141] libmachine: (ha-783738-m02) DBG | I0217 11:57:31.947545 100573 retry.go:31] will retry after 3.924986004s: waiting for domain to come up
I0217 11:57:35.877102 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:35.877437 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has current primary IP address 192.168.39.31 and MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:35.877460 100380 main.go:141] libmachine: (ha-783738-m02) found domain IP: 192.168.39.31
I0217 11:57:35.877473 100380 main.go:141] libmachine: (ha-783738-m02) reserving static IP address...
I0217 11:57:35.877915 100380 main.go:141] libmachine: (ha-783738-m02) DBG | found host DHCP lease matching {name: "ha-783738-m02", mac: "52:54:00:06:81:a2", ip: "192.168.39.31"} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:53:18 +0000 UTC Type:0 Mac:52:54:00:06:81:a2 Iaid: IPaddr:192.168.39.31 Prefix:24 Hostname:ha-783738-m02 Clientid:01:52:54:00:06:81:a2}
I0217 11:57:35.877942 100380 main.go:141] libmachine: (ha-783738-m02) DBG | skip adding static IP to network mk-ha-783738 - found existing host DHCP lease matching {name: "ha-783738-m02", mac: "52:54:00:06:81:a2", ip: "192.168.39.31"}
I0217 11:57:35.877960 100380 main.go:141] libmachine: (ha-783738-m02) reserved static IP address 192.168.39.31 for domain ha-783738-m02
I0217 11:57:35.877972 100380 main.go:141] libmachine: (ha-783738-m02) waiting for SSH...
I0217 11:57:35.877983 100380 main.go:141] libmachine: (ha-783738-m02) DBG | Getting to WaitForSSH function...
I0217 11:57:35.880382 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:35.880801 100380 main.go:141] libmachine: (ha-783738-m02) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:06:81:a2", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:53:18 +0000 UTC Type:0 Mac:52:54:00:06:81:a2 Iaid: IPaddr:192.168.39.31 Prefix:24 Hostname:ha-783738-m02 Clientid:01:52:54:00:06:81:a2}
I0217 11:57:35.880830 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined IP address 192.168.39.31 and MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:35.880903 100380 main.go:141] libmachine: (ha-783738-m02) DBG | Using SSH client type: external
I0217 11:57:35.880925 100380 main.go:141] libmachine: (ha-783738-m02) DBG | Using SSH private key: /home/jenkins/minikube-integration/20427-77349/.minikube/machines/ha-783738-m02/id_rsa (-rw-------)
I0217 11:57:35.880955 100380 main.go:141] libmachine: (ha-783738-m02) DBG | &{[-F /dev/null -o ConnectionAttempts=3 -o ConnectTimeout=10 -o ControlMaster=no -o ControlPath=none -o LogLevel=quiet -o PasswordAuthentication=no -o ServerAliveInterval=60 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null docker@192.168.39.31 -o IdentitiesOnly=yes -i /home/jenkins/minikube-integration/20427-77349/.minikube/machines/ha-783738-m02/id_rsa -p 22] /usr/bin/ssh <nil>}
I0217 11:57:35.880970 100380 main.go:141] libmachine: (ha-783738-m02) DBG | About to run SSH command:
I0217 11:57:35.880982 100380 main.go:141] libmachine: (ha-783738-m02) DBG | exit 0
I0217 11:57:36.005182 100380 main.go:141] libmachine: (ha-783738-m02) DBG | SSH cmd err, output: <nil>:
I0217 11:57:36.005527 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetConfigRaw
I0217 11:57:36.006216 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetIP
I0217 11:57:36.008704 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:36.009084 100380 main.go:141] libmachine: (ha-783738-m02) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:06:81:a2", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:53:18 +0000 UTC Type:0 Mac:52:54:00:06:81:a2 Iaid: IPaddr:192.168.39.31 Prefix:24 Hostname:ha-783738-m02 Clientid:01:52:54:00:06:81:a2}
I0217 11:57:36.009118 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined IP address 192.168.39.31 and MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:36.009443 100380 profile.go:143] Saving config to /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/config.json ...
I0217 11:57:36.009639 100380 machine.go:93] provisionDockerMachine start ...
I0217 11:57:36.009657 100380 main.go:141] libmachine: (ha-783738-m02) Calling .DriverName
I0217 11:57:36.009816 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHHostname
I0217 11:57:36.011849 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:36.012187 100380 main.go:141] libmachine: (ha-783738-m02) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:06:81:a2", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:53:18 +0000 UTC Type:0 Mac:52:54:00:06:81:a2 Iaid: IPaddr:192.168.39.31 Prefix:24 Hostname:ha-783738-m02 Clientid:01:52:54:00:06:81:a2}
I0217 11:57:36.012218 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined IP address 192.168.39.31 and MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:36.012360 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHPort
I0217 11:57:36.012557 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHKeyPath
I0217 11:57:36.012710 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHKeyPath
I0217 11:57:36.012836 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHUsername
I0217 11:57:36.012947 100380 main.go:141] libmachine: Using SSH client type: native
I0217 11:57:36.013115 100380 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x865ca0] 0x868980 <nil> [] 0s} 192.168.39.31 22 <nil> <nil>}
I0217 11:57:36.013130 100380 main.go:141] libmachine: About to run SSH command:
hostname
I0217 11:57:36.113056 100380 main.go:141] libmachine: SSH cmd err, output: <nil>: minikube
I0217 11:57:36.113093 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetMachineName
I0217 11:57:36.113376 100380 buildroot.go:166] provisioning hostname "ha-783738-m02"
I0217 11:57:36.113403 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetMachineName
I0217 11:57:36.113566 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHHostname
I0217 11:57:36.116233 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:36.116606 100380 main.go:141] libmachine: (ha-783738-m02) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:06:81:a2", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:53:18 +0000 UTC Type:0 Mac:52:54:00:06:81:a2 Iaid: IPaddr:192.168.39.31 Prefix:24 Hostname:ha-783738-m02 Clientid:01:52:54:00:06:81:a2}
I0217 11:57:36.116634 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined IP address 192.168.39.31 and MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:36.116762 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHPort
I0217 11:57:36.116907 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHKeyPath
I0217 11:57:36.117025 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHKeyPath
I0217 11:57:36.117242 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHUsername
I0217 11:57:36.117464 100380 main.go:141] libmachine: Using SSH client type: native
I0217 11:57:36.117681 100380 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x865ca0] 0x868980 <nil> [] 0s} 192.168.39.31 22 <nil> <nil>}
I0217 11:57:36.117699 100380 main.go:141] libmachine: About to run SSH command:
sudo hostname ha-783738-m02 && echo "ha-783738-m02" | sudo tee /etc/hostname
I0217 11:57:36.230628 100380 main.go:141] libmachine: SSH cmd err, output: <nil>: ha-783738-m02
I0217 11:57:36.230670 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHHostname
I0217 11:57:36.233644 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:36.233991 100380 main.go:141] libmachine: (ha-783738-m02) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:06:81:a2", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:53:18 +0000 UTC Type:0 Mac:52:54:00:06:81:a2 Iaid: IPaddr:192.168.39.31 Prefix:24 Hostname:ha-783738-m02 Clientid:01:52:54:00:06:81:a2}
I0217 11:57:36.234015 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined IP address 192.168.39.31 and MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:36.234196 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHPort
I0217 11:57:36.234491 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHKeyPath
I0217 11:57:36.234686 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHKeyPath
I0217 11:57:36.234856 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHUsername
I0217 11:57:36.235006 100380 main.go:141] libmachine: Using SSH client type: native
I0217 11:57:36.235194 100380 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x865ca0] 0x868980 <nil> [] 0s} 192.168.39.31 22 <nil> <nil>}
I0217 11:57:36.235211 100380 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\sha-783738-m02' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 ha-783738-m02/g' /etc/hosts;
else
echo '127.0.1.1 ha-783738-m02' | sudo tee -a /etc/hosts;
fi
fi
I0217 11:57:36.341290 100380 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0217 11:57:36.341332 100380 buildroot.go:172] set auth options {CertDir:/home/jenkins/minikube-integration/20427-77349/.minikube CaCertPath:/home/jenkins/minikube-integration/20427-77349/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/20427-77349/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/20427-77349/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/20427-77349/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/20427-77349/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/20427-77349/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/20427-77349/.minikube}
I0217 11:57:36.341348 100380 buildroot.go:174] setting up certificates
I0217 11:57:36.341360 100380 provision.go:84] configureAuth start
I0217 11:57:36.341373 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetMachineName
I0217 11:57:36.341646 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetIP
I0217 11:57:36.344453 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:36.344944 100380 main.go:141] libmachine: (ha-783738-m02) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:06:81:a2", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:53:18 +0000 UTC Type:0 Mac:52:54:00:06:81:a2 Iaid: IPaddr:192.168.39.31 Prefix:24 Hostname:ha-783738-m02 Clientid:01:52:54:00:06:81:a2}
I0217 11:57:36.344981 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined IP address 192.168.39.31 and MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:36.345158 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHHostname
I0217 11:57:36.347416 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:36.347719 100380 main.go:141] libmachine: (ha-783738-m02) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:06:81:a2", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:53:18 +0000 UTC Type:0 Mac:52:54:00:06:81:a2 Iaid: IPaddr:192.168.39.31 Prefix:24 Hostname:ha-783738-m02 Clientid:01:52:54:00:06:81:a2}
I0217 11:57:36.347744 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined IP address 192.168.39.31 and MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:36.347910 100380 provision.go:143] copyHostCerts
I0217 11:57:36.347943 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/certs/ca.pem -> /home/jenkins/minikube-integration/20427-77349/.minikube/ca.pem
I0217 11:57:36.347989 100380 exec_runner.go:144] found /home/jenkins/minikube-integration/20427-77349/.minikube/ca.pem, removing ...
I0217 11:57:36.347999 100380 exec_runner.go:203] rm: /home/jenkins/minikube-integration/20427-77349/.minikube/ca.pem
I0217 11:57:36.348065 100380 exec_runner.go:151] cp: /home/jenkins/minikube-integration/20427-77349/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/20427-77349/.minikube/ca.pem (1082 bytes)
I0217 11:57:36.348156 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/certs/cert.pem -> /home/jenkins/minikube-integration/20427-77349/.minikube/cert.pem
I0217 11:57:36.348190 100380 exec_runner.go:144] found /home/jenkins/minikube-integration/20427-77349/.minikube/cert.pem, removing ...
I0217 11:57:36.348200 100380 exec_runner.go:203] rm: /home/jenkins/minikube-integration/20427-77349/.minikube/cert.pem
I0217 11:57:36.348229 100380 exec_runner.go:151] cp: /home/jenkins/minikube-integration/20427-77349/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/20427-77349/.minikube/cert.pem (1123 bytes)
I0217 11:57:36.348286 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/certs/key.pem -> /home/jenkins/minikube-integration/20427-77349/.minikube/key.pem
I0217 11:57:36.348310 100380 exec_runner.go:144] found /home/jenkins/minikube-integration/20427-77349/.minikube/key.pem, removing ...
I0217 11:57:36.348320 100380 exec_runner.go:203] rm: /home/jenkins/minikube-integration/20427-77349/.minikube/key.pem
I0217 11:57:36.348347 100380 exec_runner.go:151] cp: /home/jenkins/minikube-integration/20427-77349/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/20427-77349/.minikube/key.pem (1675 bytes)
I0217 11:57:36.348413 100380 provision.go:117] generating server cert: /home/jenkins/minikube-integration/20427-77349/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/20427-77349/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/20427-77349/.minikube/certs/ca-key.pem org=jenkins.ha-783738-m02 san=[127.0.0.1 192.168.39.31 ha-783738-m02 localhost minikube]
I0217 11:57:36.476199 100380 provision.go:177] copyRemoteCerts
I0217 11:57:36.476256 100380 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0217 11:57:36.476280 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHHostname
I0217 11:57:36.479126 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:36.479497 100380 main.go:141] libmachine: (ha-783738-m02) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:06:81:a2", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:53:18 +0000 UTC Type:0 Mac:52:54:00:06:81:a2 Iaid: IPaddr:192.168.39.31 Prefix:24 Hostname:ha-783738-m02 Clientid:01:52:54:00:06:81:a2}
I0217 11:57:36.479529 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined IP address 192.168.39.31 and MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:36.479677 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHPort
I0217 11:57:36.479868 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHKeyPath
I0217 11:57:36.480073 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHUsername
I0217 11:57:36.480258 100380 sshutil.go:53] new ssh client: &{IP:192.168.39.31 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/20427-77349/.minikube/machines/ha-783738-m02/id_rsa Username:docker}
I0217 11:57:36.558954 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/machines/server-key.pem -> /etc/docker/server-key.pem
I0217 11:57:36.559023 100380 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20427-77349/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I0217 11:57:36.581755 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/certs/ca.pem -> /etc/docker/ca.pem
I0217 11:57:36.581816 100380 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20427-77349/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I0217 11:57:36.604328 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/machines/server.pem -> /etc/docker/server.pem
I0217 11:57:36.604411 100380 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20427-77349/.minikube/machines/server.pem --> /etc/docker/server.pem (1208 bytes)
I0217 11:57:36.626183 100380 provision.go:87] duration metric: took 284.807453ms to configureAuth
I0217 11:57:36.626219 100380 buildroot.go:189] setting minikube options for container-runtime
I0217 11:57:36.626492 100380 config.go:182] Loaded profile config "ha-783738": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.32.1
I0217 11:57:36.626522 100380 main.go:141] libmachine: (ha-783738-m02) Calling .DriverName
I0217 11:57:36.626768 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHHostname
I0217 11:57:36.629194 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:36.629569 100380 main.go:141] libmachine: (ha-783738-m02) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:06:81:a2", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:53:18 +0000 UTC Type:0 Mac:52:54:00:06:81:a2 Iaid: IPaddr:192.168.39.31 Prefix:24 Hostname:ha-783738-m02 Clientid:01:52:54:00:06:81:a2}
I0217 11:57:36.629594 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined IP address 192.168.39.31 and MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:36.629740 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHPort
I0217 11:57:36.629904 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHKeyPath
I0217 11:57:36.630077 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHKeyPath
I0217 11:57:36.630201 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHUsername
I0217 11:57:36.630389 100380 main.go:141] libmachine: Using SSH client type: native
I0217 11:57:36.630601 100380 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x865ca0] 0x868980 <nil> [] 0s} 192.168.39.31 22 <nil> <nil>}
I0217 11:57:36.630614 100380 main.go:141] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I0217 11:57:36.730964 100380 main.go:141] libmachine: SSH cmd err, output: <nil>: tmpfs
I0217 11:57:36.730995 100380 buildroot.go:70] root file system type: tmpfs
I0217 11:57:36.731148 100380 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I0217 11:57:36.731184 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHHostname
I0217 11:57:36.733718 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:36.734119 100380 main.go:141] libmachine: (ha-783738-m02) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:06:81:a2", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:53:18 +0000 UTC Type:0 Mac:52:54:00:06:81:a2 Iaid: IPaddr:192.168.39.31 Prefix:24 Hostname:ha-783738-m02 Clientid:01:52:54:00:06:81:a2}
I0217 11:57:36.734150 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined IP address 192.168.39.31 and MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:36.734340 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHPort
I0217 11:57:36.734539 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHKeyPath
I0217 11:57:36.734714 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHKeyPath
I0217 11:57:36.734847 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHUsername
I0217 11:57:36.734986 100380 main.go:141] libmachine: Using SSH client type: native
I0217 11:57:36.735198 100380 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x865ca0] 0x868980 <nil> [] 0s} 192.168.39.31 22 <nil> <nil>}
I0217 11:57:36.735304 100380 main.go:141] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network.target minikube-automount.service docker.socket
Requires= minikube-automount.service docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
Environment="NO_PROXY=192.168.39.249"
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I0217 11:57:36.846599 100380 main.go:141] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network.target minikube-automount.service docker.socket
Requires= minikube-automount.service docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
Environment=NO_PROXY=192.168.39.249
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
I0217 11:57:36.846633 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHHostname
I0217 11:57:36.849370 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:36.849714 100380 main.go:141] libmachine: (ha-783738-m02) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:06:81:a2", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:53:18 +0000 UTC Type:0 Mac:52:54:00:06:81:a2 Iaid: IPaddr:192.168.39.31 Prefix:24 Hostname:ha-783738-m02 Clientid:01:52:54:00:06:81:a2}
I0217 11:57:36.849733 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined IP address 192.168.39.31 and MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:36.849923 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHPort
I0217 11:57:36.850116 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHKeyPath
I0217 11:57:36.850290 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHKeyPath
I0217 11:57:36.850443 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHUsername
I0217 11:57:36.850608 100380 main.go:141] libmachine: Using SSH client type: native
I0217 11:57:36.850788 100380 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x865ca0] 0x868980 <nil> [] 0s} 192.168.39.31 22 <nil> <nil>}
I0217 11:57:36.850805 100380 main.go:141] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I0217 11:57:38.700010 100380 main.go:141] libmachine: SSH cmd err, output: <nil>: diff: can't stat '/lib/systemd/system/docker.service': No such file or directory
Created symlink /etc/systemd/system/multi-user.target.wants/docker.service → /usr/lib/systemd/system/docker.service.
I0217 11:57:38.700036 100380 machine.go:96] duration metric: took 2.690384734s to provisionDockerMachine
I0217 11:57:38.700051 100380 start.go:293] postStartSetup for "ha-783738-m02" (driver="kvm2")
I0217 11:57:38.700060 100380 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0217 11:57:38.700075 100380 main.go:141] libmachine: (ha-783738-m02) Calling .DriverName
I0217 11:57:38.700389 100380 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0217 11:57:38.700425 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHHostname
I0217 11:57:38.703068 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:38.703435 100380 main.go:141] libmachine: (ha-783738-m02) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:06:81:a2", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:53:18 +0000 UTC Type:0 Mac:52:54:00:06:81:a2 Iaid: IPaddr:192.168.39.31 Prefix:24 Hostname:ha-783738-m02 Clientid:01:52:54:00:06:81:a2}
I0217 11:57:38.703465 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined IP address 192.168.39.31 and MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:38.703605 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHPort
I0217 11:57:38.703807 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHKeyPath
I0217 11:57:38.703952 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHUsername
I0217 11:57:38.704102 100380 sshutil.go:53] new ssh client: &{IP:192.168.39.31 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/20427-77349/.minikube/machines/ha-783738-m02/id_rsa Username:docker}
I0217 11:57:38.783381 100380 ssh_runner.go:195] Run: cat /etc/os-release
I0217 11:57:38.787188 100380 info.go:137] Remote host: Buildroot 2023.02.9
I0217 11:57:38.787215 100380 filesync.go:126] Scanning /home/jenkins/minikube-integration/20427-77349/.minikube/addons for local assets ...
I0217 11:57:38.787270 100380 filesync.go:126] Scanning /home/jenkins/minikube-integration/20427-77349/.minikube/files for local assets ...
I0217 11:57:38.787341 100380 filesync.go:149] local asset: /home/jenkins/minikube-integration/20427-77349/.minikube/files/etc/ssl/certs/845022.pem -> 845022.pem in /etc/ssl/certs
I0217 11:57:38.787352 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/files/etc/ssl/certs/845022.pem -> /etc/ssl/certs/845022.pem
I0217 11:57:38.787430 100380 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I0217 11:57:38.796091 100380 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20427-77349/.minikube/files/etc/ssl/certs/845022.pem --> /etc/ssl/certs/845022.pem (1708 bytes)
I0217 11:57:38.817716 100380 start.go:296] duration metric: took 117.649565ms for postStartSetup
I0217 11:57:38.817759 100380 main.go:141] libmachine: (ha-783738-m02) Calling .DriverName
I0217 11:57:38.818052 100380 ssh_runner.go:195] Run: sudo ls --almost-all -1 /var/lib/minikube/backup
I0217 11:57:38.818087 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHHostname
I0217 11:57:38.820354 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:38.820669 100380 main.go:141] libmachine: (ha-783738-m02) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:06:81:a2", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:53:18 +0000 UTC Type:0 Mac:52:54:00:06:81:a2 Iaid: IPaddr:192.168.39.31 Prefix:24 Hostname:ha-783738-m02 Clientid:01:52:54:00:06:81:a2}
I0217 11:57:38.820694 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined IP address 192.168.39.31 and MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:38.820809 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHPort
I0217 11:57:38.820978 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHKeyPath
I0217 11:57:38.821138 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHUsername
I0217 11:57:38.821273 100380 sshutil.go:53] new ssh client: &{IP:192.168.39.31 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/20427-77349/.minikube/machines/ha-783738-m02/id_rsa Username:docker}
I0217 11:57:38.900214 100380 machine.go:197] restoring vm config from /var/lib/minikube/backup: [etc]
I0217 11:57:38.900294 100380 ssh_runner.go:195] Run: sudo rsync --archive --update /var/lib/minikube/backup/etc /
I0217 11:57:38.959273 100380 fix.go:56] duration metric: took 21.689681729s for fixHost
I0217 11:57:38.959327 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHHostname
I0217 11:57:38.961853 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:38.962326 100380 main.go:141] libmachine: (ha-783738-m02) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:06:81:a2", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:53:18 +0000 UTC Type:0 Mac:52:54:00:06:81:a2 Iaid: IPaddr:192.168.39.31 Prefix:24 Hostname:ha-783738-m02 Clientid:01:52:54:00:06:81:a2}
I0217 11:57:38.962364 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined IP address 192.168.39.31 and MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:38.962591 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHPort
I0217 11:57:38.962788 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHKeyPath
I0217 11:57:38.962952 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHKeyPath
I0217 11:57:38.963062 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHUsername
I0217 11:57:38.963238 100380 main.go:141] libmachine: Using SSH client type: native
I0217 11:57:38.963408 100380 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x865ca0] 0x868980 <nil> [] 0s} 192.168.39.31 22 <nil> <nil>}
I0217 11:57:38.963419 100380 main.go:141] libmachine: About to run SSH command:
date +%s.%N
I0217 11:57:39.071315 100380 main.go:141] libmachine: SSH cmd err, output: <nil>: 1739793459.049434891
I0217 11:57:39.071339 100380 fix.go:216] guest clock: 1739793459.049434891
I0217 11:57:39.071349 100380 fix.go:229] Guest: 2025-02-17 11:57:39.049434891 +0000 UTC Remote: 2025-02-17 11:57:38.959302801 +0000 UTC m=+48.782039917 (delta=90.13209ms)
I0217 11:57:39.071366 100380 fix.go:200] guest clock delta is within tolerance: 90.13209ms
I0217 11:57:39.071371 100380 start.go:83] releasing machines lock for "ha-783738-m02", held for 21.801804436s
I0217 11:57:39.071393 100380 main.go:141] libmachine: (ha-783738-m02) Calling .DriverName
I0217 11:57:39.071600 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetIP
I0217 11:57:39.074321 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:39.074707 100380 main.go:141] libmachine: (ha-783738-m02) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:06:81:a2", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:53:18 +0000 UTC Type:0 Mac:52:54:00:06:81:a2 Iaid: IPaddr:192.168.39.31 Prefix:24 Hostname:ha-783738-m02 Clientid:01:52:54:00:06:81:a2}
I0217 11:57:39.074736 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined IP address 192.168.39.31 and MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:39.076949 100380 out.go:177] * Found network options:
I0217 11:57:39.078428 100380 out.go:177] - NO_PROXY=192.168.39.249
W0217 11:57:39.079686 100380 proxy.go:119] fail to check proxy env: Error ip not in block
I0217 11:57:39.079714 100380 main.go:141] libmachine: (ha-783738-m02) Calling .DriverName
I0217 11:57:39.080218 100380 main.go:141] libmachine: (ha-783738-m02) Calling .DriverName
I0217 11:57:39.080403 100380 main.go:141] libmachine: (ha-783738-m02) Calling .DriverName
I0217 11:57:39.080510 100380 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0217 11:57:39.080551 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHHostname
W0217 11:57:39.080631 100380 proxy.go:119] fail to check proxy env: Error ip not in block
I0217 11:57:39.080722 100380 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0217 11:57:39.080748 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHHostname
I0217 11:57:39.083432 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:39.083453 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:39.083887 100380 main.go:141] libmachine: (ha-783738-m02) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:06:81:a2", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:53:18 +0000 UTC Type:0 Mac:52:54:00:06:81:a2 Iaid: IPaddr:192.168.39.31 Prefix:24 Hostname:ha-783738-m02 Clientid:01:52:54:00:06:81:a2}
I0217 11:57:39.083914 100380 main.go:141] libmachine: (ha-783738-m02) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:06:81:a2", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:53:18 +0000 UTC Type:0 Mac:52:54:00:06:81:a2 Iaid: IPaddr:192.168.39.31 Prefix:24 Hostname:ha-783738-m02 Clientid:01:52:54:00:06:81:a2}
I0217 11:57:39.083933 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined IP address 192.168.39.31 and MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:39.083949 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined IP address 192.168.39.31 and MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:39.084264 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHPort
I0217 11:57:39.084411 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHKeyPath
I0217 11:57:39.084597 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHUsername
I0217 11:57:39.084609 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHPort
I0217 11:57:39.084763 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHKeyPath
I0217 11:57:39.084784 100380 sshutil.go:53] new ssh client: &{IP:192.168.39.31 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/20427-77349/.minikube/machines/ha-783738-m02/id_rsa Username:docker}
I0217 11:57:39.084915 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHUsername
I0217 11:57:39.085034 100380 sshutil.go:53] new ssh client: &{IP:192.168.39.31 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/20427-77349/.minikube/machines/ha-783738-m02/id_rsa Username:docker}
W0217 11:57:39.178061 100380 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I0217 11:57:39.178137 100380 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0217 11:57:39.195964 100380 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
I0217 11:57:39.196001 100380 start.go:495] detecting cgroup driver to use...
I0217 11:57:39.196148 100380 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0217 11:57:39.216666 100380 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10"|' /etc/containerd/config.toml"
I0217 11:57:39.226815 100380 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0217 11:57:39.236611 100380 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I0217 11:57:39.236669 100380 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I0217 11:57:39.246500 100380 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0217 11:57:39.256691 100380 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0217 11:57:39.266509 100380 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0217 11:57:39.276231 100380 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0217 11:57:39.286298 100380 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0217 11:57:39.296149 100380 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0217 11:57:39.305984 100380 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0217 11:57:39.315650 100380 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0217 11:57:39.324721 100380 crio.go:166] couldn't verify netfilter by "sudo sysctl net.bridge.bridge-nf-call-iptables" which might be okay. error: sudo sysctl net.bridge.bridge-nf-call-iptables: Process exited with status 255
stdout:
stderr:
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
I0217 11:57:39.324777 100380 ssh_runner.go:195] Run: sudo modprobe br_netfilter
I0217 11:57:39.334429 100380 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0217 11:57:39.343052 100380 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0217 11:57:39.458041 100380 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0217 11:57:39.483361 100380 start.go:495] detecting cgroup driver to use...
I0217 11:57:39.483453 100380 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I0217 11:57:39.501404 100380 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I0217 11:57:39.522545 100380 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
I0217 11:57:39.545214 100380 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I0217 11:57:39.557462 100380 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0217 11:57:39.569445 100380 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I0217 11:57:39.593668 100380 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0217 11:57:39.606767 100380 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I0217 11:57:39.623713 100380 ssh_runner.go:195] Run: which cri-dockerd
I0217 11:57:39.627306 100380 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I0217 11:57:39.635920 100380 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (190 bytes)
I0217 11:57:39.651184 100380 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I0217 11:57:39.767938 100380 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I0217 11:57:39.884761 100380 docker.go:574] configuring docker to use "cgroupfs" as cgroup driver...
I0217 11:57:39.884806 100380 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
I0217 11:57:39.900934 100380 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0217 11:57:40.013206 100380 ssh_runner.go:195] Run: sudo systemctl restart docker
I0217 11:58:41.088581 100380 ssh_runner.go:235] Completed: sudo systemctl restart docker: (1m1.075335279s)
I0217 11:58:41.088680 100380 ssh_runner.go:195] Run: sudo journalctl --no-pager -u docker
I0217 11:58:41.109373 100380 out.go:201]
W0217 11:58:41.110918 100380 out.go:270] X Exiting due to RUNTIME_ENABLE: Failed to enable container runtime: sudo systemctl restart docker: Process exited with status 1
stdout:
stderr:
Job for docker.service failed because the control process exited with error code.
See "systemctl status docker.service" and "journalctl -xeu docker.service" for details.
sudo journalctl --no-pager -u docker:
-- stdout --
Feb 17 11:57:37 ha-783738-m02 systemd[1]: Starting Docker Application Container Engine...
Feb 17 11:57:37 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:37.207555071Z" level=info msg="Starting up"
Feb 17 11:57:37 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:37.208523706Z" level=info msg="containerd not running, starting managed containerd"
Feb 17 11:57:37 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:37.209284365Z" level=info msg="started new containerd process" address=/var/run/docker/containerd/containerd.sock module=libcontainerd pid=499
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.234357473Z" level=info msg="starting containerd" revision=57f17b0a6295a39009d861b89e3b3b87b005ca27 version=v1.7.23
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.253922324Z" level=info msg="loading plugin \"io.containerd.event.v1.exchange\"..." type=io.containerd.event.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.254071326Z" level=info msg="loading plugin \"io.containerd.internal.v1.opt\"..." type=io.containerd.internal.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.254155313Z" level=info msg="loading plugin \"io.containerd.warning.v1.deprecations\"..." type=io.containerd.warning.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.254195097Z" level=info msg="loading plugin \"io.containerd.snapshotter.v1.blockfile\"..." type=io.containerd.snapshotter.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.254502645Z" level=info msg="skip loading plugin \"io.containerd.snapshotter.v1.blockfile\"..." error="no scratch file generator: skip plugin" type=io.containerd.snapshotter.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.254572700Z" level=info msg="loading plugin \"io.containerd.snapshotter.v1.btrfs\"..." type=io.containerd.snapshotter.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.254826671Z" level=info msg="skip loading plugin \"io.containerd.snapshotter.v1.btrfs\"..." error="path /var/lib/docker/containerd/daemon/io.containerd.snapshotter.v1.btrfs (ext4) must be a btrfs filesystem to be used with the btrfs snapshotter: skip plugin" type=io.containerd.snapshotter.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.254880442Z" level=info msg="loading plugin \"io.containerd.snapshotter.v1.devmapper\"..." type=io.containerd.snapshotter.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.254926515Z" level=info msg="skip loading plugin \"io.containerd.snapshotter.v1.devmapper\"..." error="devmapper not configured: skip plugin" type=io.containerd.snapshotter.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.254965881Z" level=info msg="loading plugin \"io.containerd.snapshotter.v1.native\"..." type=io.containerd.snapshotter.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.255209553Z" level=info msg="loading plugin \"io.containerd.snapshotter.v1.overlayfs\"..." type=io.containerd.snapshotter.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.255502921Z" level=info msg="loading plugin \"io.containerd.snapshotter.v1.aufs\"..." type=io.containerd.snapshotter.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.257578132Z" level=info msg="skip loading plugin \"io.containerd.snapshotter.v1.aufs\"..." error="aufs is not supported (modprobe aufs failed: exit status 1 \"modprobe: FATAL: Module aufs not found in directory /lib/modules/5.10.207\\n\"): skip plugin" type=io.containerd.snapshotter.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.257723954Z" level=info msg="loading plugin \"io.containerd.snapshotter.v1.zfs\"..." type=io.containerd.snapshotter.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.257912930Z" level=info msg="skip loading plugin \"io.containerd.snapshotter.v1.zfs\"..." error="path /var/lib/docker/containerd/daemon/io.containerd.snapshotter.v1.zfs must be a zfs filesystem to be used with the zfs snapshotter: skip plugin" type=io.containerd.snapshotter.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.257960933Z" level=info msg="loading plugin \"io.containerd.content.v1.content\"..." type=io.containerd.content.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.258214223Z" level=info msg="loading plugin \"io.containerd.metadata.v1.bolt\"..." type=io.containerd.metadata.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.258292090Z" level=info msg="metadata content store policy set" policy=shared
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.262281766Z" level=info msg="loading plugin \"io.containerd.gc.v1.scheduler\"..." type=io.containerd.gc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.262389757Z" level=info msg="loading plugin \"io.containerd.differ.v1.walking\"..." type=io.containerd.differ.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.262437193Z" level=info msg="loading plugin \"io.containerd.lease.v1.manager\"..." type=io.containerd.lease.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.262478052Z" level=info msg="loading plugin \"io.containerd.streaming.v1.manager\"..." type=io.containerd.streaming.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.262523730Z" level=info msg="loading plugin \"io.containerd.runtime.v1.linux\"..." type=io.containerd.runtime.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.262614966Z" level=info msg="loading plugin \"io.containerd.monitor.v1.cgroups\"..." type=io.containerd.monitor.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.262915194Z" level=info msg="loading plugin \"io.containerd.runtime.v2.task\"..." type=io.containerd.runtime.v2
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263049035Z" level=info msg="loading plugin \"io.containerd.runtime.v2.shim\"..." type=io.containerd.runtime.v2
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263094390Z" level=info msg="loading plugin \"io.containerd.sandbox.store.v1.local\"..." type=io.containerd.sandbox.store.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263137669Z" level=info msg="loading plugin \"io.containerd.sandbox.controller.v1.local\"..." type=io.containerd.sandbox.controller.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263176270Z" level=info msg="loading plugin \"io.containerd.service.v1.containers-service\"..." type=io.containerd.service.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263217488Z" level=info msg="loading plugin \"io.containerd.service.v1.content-service\"..." type=io.containerd.service.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263254710Z" level=info msg="loading plugin \"io.containerd.service.v1.diff-service\"..." type=io.containerd.service.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263292496Z" level=info msg="loading plugin \"io.containerd.service.v1.images-service\"..." type=io.containerd.service.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263339613Z" level=info msg="loading plugin \"io.containerd.service.v1.introspection-service\"..." type=io.containerd.service.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263377065Z" level=info msg="loading plugin \"io.containerd.service.v1.namespaces-service\"..." type=io.containerd.service.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263418085Z" level=info msg="loading plugin \"io.containerd.service.v1.snapshots-service\"..." type=io.containerd.service.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263453223Z" level=info msg="loading plugin \"io.containerd.service.v1.tasks-service\"..." type=io.containerd.service.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263511094Z" level=info msg="loading plugin \"io.containerd.grpc.v1.containers\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263549833Z" level=info msg="loading plugin \"io.containerd.grpc.v1.content\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263589341Z" level=info msg="loading plugin \"io.containerd.grpc.v1.diff\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263631649Z" level=info msg="loading plugin \"io.containerd.grpc.v1.events\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263726157Z" level=info msg="loading plugin \"io.containerd.grpc.v1.images\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263766086Z" level=info msg="loading plugin \"io.containerd.grpc.v1.introspection\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263809930Z" level=info msg="loading plugin \"io.containerd.grpc.v1.leases\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263847665Z" level=info msg="loading plugin \"io.containerd.grpc.v1.namespaces\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263885358Z" level=info msg="loading plugin \"io.containerd.grpc.v1.sandbox-controllers\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263932212Z" level=info msg="loading plugin \"io.containerd.grpc.v1.sandboxes\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263972615Z" level=info msg="loading plugin \"io.containerd.grpc.v1.snapshots\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.264020660Z" level=info msg="loading plugin \"io.containerd.grpc.v1.streaming\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.264063975Z" level=info msg="loading plugin \"io.containerd.grpc.v1.tasks\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.264103157Z" level=info msg="loading plugin \"io.containerd.transfer.v1.local\"..." type=io.containerd.transfer.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.264158305Z" level=info msg="loading plugin \"io.containerd.grpc.v1.transfer\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.264194401Z" level=info msg="loading plugin \"io.containerd.grpc.v1.version\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.264230305Z" level=info msg="loading plugin \"io.containerd.internal.v1.restart\"..." type=io.containerd.internal.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.264327104Z" level=info msg="loading plugin \"io.containerd.tracing.processor.v1.otlp\"..." type=io.containerd.tracing.processor.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.264417123Z" level=info msg="skip loading plugin \"io.containerd.tracing.processor.v1.otlp\"..." error="skip plugin: tracing endpoint not configured" type=io.containerd.tracing.processor.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.264457690Z" level=info msg="loading plugin \"io.containerd.internal.v1.tracing\"..." type=io.containerd.internal.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.264499822Z" level=info msg="skip loading plugin \"io.containerd.internal.v1.tracing\"..." error="skip plugin: tracing endpoint not configured" type=io.containerd.internal.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.264534568Z" level=info msg="loading plugin \"io.containerd.grpc.v1.healthcheck\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.264575047Z" level=info msg="loading plugin \"io.containerd.nri.v1.nri\"..." type=io.containerd.nri.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.264616722Z" level=info msg="NRI interface is disabled by configuration."
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.264938960Z" level=info msg=serving... address=/var/run/docker/containerd/containerd-debug.sock
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.265032087Z" level=info msg=serving... address=/var/run/docker/containerd/containerd.sock.ttrpc
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.265091203Z" level=info msg=serving... address=/var/run/docker/containerd/containerd.sock
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.265132167Z" level=info msg="containerd successfully booted in 0.032037s"
Feb 17 11:57:38 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:38.237803305Z" level=info msg="[graphdriver] trying configured driver: overlay2"
Feb 17 11:57:38 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:38.295143778Z" level=info msg="Loading containers: start."
Feb 17 11:57:38 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:38.484051173Z" level=warning msg="ip6tables is enabled, but cannot set up ip6tables chains" error="failed to create NAT chain DOCKER: iptables failed: ip6tables --wait -t nat -N DOCKER: ip6tables v1.8.9 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)\nPerhaps ip6tables or your kernel needs to be upgraded.\n (exit status 3)"
Feb 17 11:57:38 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:38.565431513Z" level=info msg="Default bridge (docker0) is assigned with an IP address 172.17.0.0/16. Daemon option --bip can be used to set a preferred IP address"
Feb 17 11:57:38 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:38.632528889Z" level=info msg="Loading containers: done."
Feb 17 11:57:38 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:38.653906274Z" level=warning msg="WARNING: bridge-nf-call-iptables is disabled"
Feb 17 11:57:38 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:38.653941707Z" level=warning msg="WARNING: bridge-nf-call-ip6tables is disabled"
Feb 17 11:57:38 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:38.653962858Z" level=info msg="Docker daemon" commit=92a8393 containerd-snapshotter=false storage-driver=overlay2 version=27.4.0
Feb 17 11:57:38 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:38.654196375Z" level=info msg="Daemon has completed initialization"
Feb 17 11:57:38 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:38.676178691Z" level=info msg="API listen on /var/run/docker.sock"
Feb 17 11:57:38 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:38.676315120Z" level=info msg="API listen on [::]:2376"
Feb 17 11:57:38 ha-783738-m02 systemd[1]: Started Docker Application Container Engine.
Feb 17 11:57:40 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:40.005718953Z" level=info msg="Processing signal 'terminated'"
Feb 17 11:57:40 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:40.007186879Z" level=info msg="stopping event stream following graceful shutdown" error="<nil>" module=libcontainerd namespace=moby
Feb 17 11:57:40 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:40.007378782Z" level=info msg="Daemon shutdown complete"
Feb 17 11:57:40 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:40.007446197Z" level=info msg="stopping healthcheck following graceful shutdown" module=libcontainerd
Feb 17 11:57:40 ha-783738-m02 systemd[1]: Stopping Docker Application Container Engine...
Feb 17 11:57:40 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:40.008214930Z" level=info msg="stopping event stream following graceful shutdown" error="context canceled" module=libcontainerd namespace=plugins.moby
Feb 17 11:57:41 ha-783738-m02 systemd[1]: docker.service: Deactivated successfully.
Feb 17 11:57:41 ha-783738-m02 systemd[1]: Stopped Docker Application Container Engine.
Feb 17 11:57:41 ha-783738-m02 systemd[1]: Starting Docker Application Container Engine...
Feb 17 11:57:41 ha-783738-m02 dockerd[1120]: time="2025-02-17T11:57:41.051838490Z" level=info msg="Starting up"
Feb 17 11:58:41 ha-783738-m02 dockerd[1120]: failed to start daemon: failed to dial "/run/containerd/containerd.sock": failed to dial "/run/containerd/containerd.sock": context deadline exceeded
Feb 17 11:58:41 ha-783738-m02 systemd[1]: docker.service: Main process exited, code=exited, status=1/FAILURE
Feb 17 11:58:41 ha-783738-m02 systemd[1]: docker.service: Failed with result 'exit-code'.
Feb 17 11:58:41 ha-783738-m02 systemd[1]: Failed to start Docker Application Container Engine.
-- /stdout --
X Exiting due to RUNTIME_ENABLE: Failed to enable container runtime: sudo systemctl restart docker: Process exited with status 1
stdout:
stderr:
Job for docker.service failed because the control process exited with error code.
See "systemctl status docker.service" and "journalctl -xeu docker.service" for details.
sudo journalctl --no-pager -u docker:
-- stdout --
Feb 17 11:57:37 ha-783738-m02 systemd[1]: Starting Docker Application Container Engine...
Feb 17 11:57:37 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:37.207555071Z" level=info msg="Starting up"
Feb 17 11:57:37 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:37.208523706Z" level=info msg="containerd not running, starting managed containerd"
Feb 17 11:57:37 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:37.209284365Z" level=info msg="started new containerd process" address=/var/run/docker/containerd/containerd.sock module=libcontainerd pid=499
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.234357473Z" level=info msg="starting containerd" revision=57f17b0a6295a39009d861b89e3b3b87b005ca27 version=v1.7.23
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.253922324Z" level=info msg="loading plugin \"io.containerd.event.v1.exchange\"..." type=io.containerd.event.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.254071326Z" level=info msg="loading plugin \"io.containerd.internal.v1.opt\"..." type=io.containerd.internal.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.254155313Z" level=info msg="loading plugin \"io.containerd.warning.v1.deprecations\"..." type=io.containerd.warning.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.254195097Z" level=info msg="loading plugin \"io.containerd.snapshotter.v1.blockfile\"..." type=io.containerd.snapshotter.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.254502645Z" level=info msg="skip loading plugin \"io.containerd.snapshotter.v1.blockfile\"..." error="no scratch file generator: skip plugin" type=io.containerd.snapshotter.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.254572700Z" level=info msg="loading plugin \"io.containerd.snapshotter.v1.btrfs\"..." type=io.containerd.snapshotter.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.254826671Z" level=info msg="skip loading plugin \"io.containerd.snapshotter.v1.btrfs\"..." error="path /var/lib/docker/containerd/daemon/io.containerd.snapshotter.v1.btrfs (ext4) must be a btrfs filesystem to be used with the btrfs snapshotter: skip plugin" type=io.containerd.snapshotter.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.254880442Z" level=info msg="loading plugin \"io.containerd.snapshotter.v1.devmapper\"..." type=io.containerd.snapshotter.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.254926515Z" level=info msg="skip loading plugin \"io.containerd.snapshotter.v1.devmapper\"..." error="devmapper not configured: skip plugin" type=io.containerd.snapshotter.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.254965881Z" level=info msg="loading plugin \"io.containerd.snapshotter.v1.native\"..." type=io.containerd.snapshotter.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.255209553Z" level=info msg="loading plugin \"io.containerd.snapshotter.v1.overlayfs\"..." type=io.containerd.snapshotter.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.255502921Z" level=info msg="loading plugin \"io.containerd.snapshotter.v1.aufs\"..." type=io.containerd.snapshotter.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.257578132Z" level=info msg="skip loading plugin \"io.containerd.snapshotter.v1.aufs\"..." error="aufs is not supported (modprobe aufs failed: exit status 1 \"modprobe: FATAL: Module aufs not found in directory /lib/modules/5.10.207\\n\"): skip plugin" type=io.containerd.snapshotter.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.257723954Z" level=info msg="loading plugin \"io.containerd.snapshotter.v1.zfs\"..." type=io.containerd.snapshotter.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.257912930Z" level=info msg="skip loading plugin \"io.containerd.snapshotter.v1.zfs\"..." error="path /var/lib/docker/containerd/daemon/io.containerd.snapshotter.v1.zfs must be a zfs filesystem to be used with the zfs snapshotter: skip plugin" type=io.containerd.snapshotter.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.257960933Z" level=info msg="loading plugin \"io.containerd.content.v1.content\"..." type=io.containerd.content.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.258214223Z" level=info msg="loading plugin \"io.containerd.metadata.v1.bolt\"..." type=io.containerd.metadata.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.258292090Z" level=info msg="metadata content store policy set" policy=shared
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.262281766Z" level=info msg="loading plugin \"io.containerd.gc.v1.scheduler\"..." type=io.containerd.gc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.262389757Z" level=info msg="loading plugin \"io.containerd.differ.v1.walking\"..." type=io.containerd.differ.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.262437193Z" level=info msg="loading plugin \"io.containerd.lease.v1.manager\"..." type=io.containerd.lease.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.262478052Z" level=info msg="loading plugin \"io.containerd.streaming.v1.manager\"..." type=io.containerd.streaming.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.262523730Z" level=info msg="loading plugin \"io.containerd.runtime.v1.linux\"..." type=io.containerd.runtime.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.262614966Z" level=info msg="loading plugin \"io.containerd.monitor.v1.cgroups\"..." type=io.containerd.monitor.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.262915194Z" level=info msg="loading plugin \"io.containerd.runtime.v2.task\"..." type=io.containerd.runtime.v2
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263049035Z" level=info msg="loading plugin \"io.containerd.runtime.v2.shim\"..." type=io.containerd.runtime.v2
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263094390Z" level=info msg="loading plugin \"io.containerd.sandbox.store.v1.local\"..." type=io.containerd.sandbox.store.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263137669Z" level=info msg="loading plugin \"io.containerd.sandbox.controller.v1.local\"..." type=io.containerd.sandbox.controller.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263176270Z" level=info msg="loading plugin \"io.containerd.service.v1.containers-service\"..." type=io.containerd.service.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263217488Z" level=info msg="loading plugin \"io.containerd.service.v1.content-service\"..." type=io.containerd.service.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263254710Z" level=info msg="loading plugin \"io.containerd.service.v1.diff-service\"..." type=io.containerd.service.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263292496Z" level=info msg="loading plugin \"io.containerd.service.v1.images-service\"..." type=io.containerd.service.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263339613Z" level=info msg="loading plugin \"io.containerd.service.v1.introspection-service\"..." type=io.containerd.service.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263377065Z" level=info msg="loading plugin \"io.containerd.service.v1.namespaces-service\"..." type=io.containerd.service.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263418085Z" level=info msg="loading plugin \"io.containerd.service.v1.snapshots-service\"..." type=io.containerd.service.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263453223Z" level=info msg="loading plugin \"io.containerd.service.v1.tasks-service\"..." type=io.containerd.service.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263511094Z" level=info msg="loading plugin \"io.containerd.grpc.v1.containers\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263549833Z" level=info msg="loading plugin \"io.containerd.grpc.v1.content\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263589341Z" level=info msg="loading plugin \"io.containerd.grpc.v1.diff\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263631649Z" level=info msg="loading plugin \"io.containerd.grpc.v1.events\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263726157Z" level=info msg="loading plugin \"io.containerd.grpc.v1.images\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263766086Z" level=info msg="loading plugin \"io.containerd.grpc.v1.introspection\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263809930Z" level=info msg="loading plugin \"io.containerd.grpc.v1.leases\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263847665Z" level=info msg="loading plugin \"io.containerd.grpc.v1.namespaces\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263885358Z" level=info msg="loading plugin \"io.containerd.grpc.v1.sandbox-controllers\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263932212Z" level=info msg="loading plugin \"io.containerd.grpc.v1.sandboxes\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263972615Z" level=info msg="loading plugin \"io.containerd.grpc.v1.snapshots\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.264020660Z" level=info msg="loading plugin \"io.containerd.grpc.v1.streaming\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.264063975Z" level=info msg="loading plugin \"io.containerd.grpc.v1.tasks\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.264103157Z" level=info msg="loading plugin \"io.containerd.transfer.v1.local\"..." type=io.containerd.transfer.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.264158305Z" level=info msg="loading plugin \"io.containerd.grpc.v1.transfer\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.264194401Z" level=info msg="loading plugin \"io.containerd.grpc.v1.version\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.264230305Z" level=info msg="loading plugin \"io.containerd.internal.v1.restart\"..." type=io.containerd.internal.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.264327104Z" level=info msg="loading plugin \"io.containerd.tracing.processor.v1.otlp\"..." type=io.containerd.tracing.processor.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.264417123Z" level=info msg="skip loading plugin \"io.containerd.tracing.processor.v1.otlp\"..." error="skip plugin: tracing endpoint not configured" type=io.containerd.tracing.processor.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.264457690Z" level=info msg="loading plugin \"io.containerd.internal.v1.tracing\"..." type=io.containerd.internal.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.264499822Z" level=info msg="skip loading plugin \"io.containerd.internal.v1.tracing\"..." error="skip plugin: tracing endpoint not configured" type=io.containerd.internal.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.264534568Z" level=info msg="loading plugin \"io.containerd.grpc.v1.healthcheck\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.264575047Z" level=info msg="loading plugin \"io.containerd.nri.v1.nri\"..." type=io.containerd.nri.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.264616722Z" level=info msg="NRI interface is disabled by configuration."
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.264938960Z" level=info msg=serving... address=/var/run/docker/containerd/containerd-debug.sock
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.265032087Z" level=info msg=serving... address=/var/run/docker/containerd/containerd.sock.ttrpc
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.265091203Z" level=info msg=serving... address=/var/run/docker/containerd/containerd.sock
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.265132167Z" level=info msg="containerd successfully booted in 0.032037s"
Feb 17 11:57:38 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:38.237803305Z" level=info msg="[graphdriver] trying configured driver: overlay2"
Feb 17 11:57:38 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:38.295143778Z" level=info msg="Loading containers: start."
Feb 17 11:57:38 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:38.484051173Z" level=warning msg="ip6tables is enabled, but cannot set up ip6tables chains" error="failed to create NAT chain DOCKER: iptables failed: ip6tables --wait -t nat -N DOCKER: ip6tables v1.8.9 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)\nPerhaps ip6tables or your kernel needs to be upgraded.\n (exit status 3)"
Feb 17 11:57:38 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:38.565431513Z" level=info msg="Default bridge (docker0) is assigned with an IP address 172.17.0.0/16. Daemon option --bip can be used to set a preferred IP address"
Feb 17 11:57:38 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:38.632528889Z" level=info msg="Loading containers: done."
Feb 17 11:57:38 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:38.653906274Z" level=warning msg="WARNING: bridge-nf-call-iptables is disabled"
Feb 17 11:57:38 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:38.653941707Z" level=warning msg="WARNING: bridge-nf-call-ip6tables is disabled"
Feb 17 11:57:38 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:38.653962858Z" level=info msg="Docker daemon" commit=92a8393 containerd-snapshotter=false storage-driver=overlay2 version=27.4.0
Feb 17 11:57:38 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:38.654196375Z" level=info msg="Daemon has completed initialization"
Feb 17 11:57:38 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:38.676178691Z" level=info msg="API listen on /var/run/docker.sock"
Feb 17 11:57:38 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:38.676315120Z" level=info msg="API listen on [::]:2376"
Feb 17 11:57:38 ha-783738-m02 systemd[1]: Started Docker Application Container Engine.
Feb 17 11:57:40 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:40.005718953Z" level=info msg="Processing signal 'terminated'"
Feb 17 11:57:40 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:40.007186879Z" level=info msg="stopping event stream following graceful shutdown" error="<nil>" module=libcontainerd namespace=moby
Feb 17 11:57:40 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:40.007378782Z" level=info msg="Daemon shutdown complete"
Feb 17 11:57:40 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:40.007446197Z" level=info msg="stopping healthcheck following graceful shutdown" module=libcontainerd
Feb 17 11:57:40 ha-783738-m02 systemd[1]: Stopping Docker Application Container Engine...
Feb 17 11:57:40 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:40.008214930Z" level=info msg="stopping event stream following graceful shutdown" error="context canceled" module=libcontainerd namespace=plugins.moby
Feb 17 11:57:41 ha-783738-m02 systemd[1]: docker.service: Deactivated successfully.
Feb 17 11:57:41 ha-783738-m02 systemd[1]: Stopped Docker Application Container Engine.
Feb 17 11:57:41 ha-783738-m02 systemd[1]: Starting Docker Application Container Engine...
Feb 17 11:57:41 ha-783738-m02 dockerd[1120]: time="2025-02-17T11:57:41.051838490Z" level=info msg="Starting up"
Feb 17 11:58:41 ha-783738-m02 dockerd[1120]: failed to start daemon: failed to dial "/run/containerd/containerd.sock": failed to dial "/run/containerd/containerd.sock": context deadline exceeded
Feb 17 11:58:41 ha-783738-m02 systemd[1]: docker.service: Main process exited, code=exited, status=1/FAILURE
Feb 17 11:58:41 ha-783738-m02 systemd[1]: docker.service: Failed with result 'exit-code'.
Feb 17 11:58:41 ha-783738-m02 systemd[1]: Failed to start Docker Application Container Engine.
-- /stdout --
W0217 11:58:41.110964 100380 out.go:270] *
*
W0217 11:58:41.111815 100380 out.go:293] ╭─────────────────────────────────────────────────────────────────────────────────────────────╮
│ │
│ * If the above advice does not help, please let us know: │
│ https://github.com/kubernetes/minikube/issues/new/choose │
│ │
│ * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue. │
│ │
╰─────────────────────────────────────────────────────────────────────────────────────────────╯
╭─────────────────────────────────────────────────────────────────────────────────────────────╮
│ │
│ * If the above advice does not help, please let us know: │
│ https://github.com/kubernetes/minikube/issues/new/choose │
│ │
│ * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue. │
│ │
╰─────────────────────────────────────────────────────────────────────────────────────────────╯
I0217 11:58:41.113412 100380 out.go:201]
** /stderr **
ha_test.go:564: failed to start cluster. args "out/minikube-linux-amd64 start -p ha-783738 --wait=true -v=7 --alsologtostderr --driver=kvm2 " : exit status 90
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:239: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p ha-783738 -n ha-783738
helpers_test.go:239: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.Host}} -p ha-783738 -n ha-783738: exit status 2 (235.921283ms)
-- stdout --
Running
-- /stdout --
helpers_test.go:239: status error: exit status 2 (may be ok)
helpers_test.go:244: <<< TestMultiControlPlane/serial/RestartCluster FAILED: start of post-mortem logs <<<
helpers_test.go:245: ======> post-mortem[TestMultiControlPlane/serial/RestartCluster]: minikube logs <======
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 -p ha-783738 logs -n 25
helpers_test.go:252: TestMultiControlPlane/serial/RestartCluster logs:
-- stdout --
==> Audit <==
|---------|----------------------------------------------------------------------------------|-----------|---------|---------|---------------------|---------------------|
| Command | Args | Profile | User | Version | Start Time | End Time |
|---------|----------------------------------------------------------------------------------|-----------|---------|---------|---------------------|---------------------|
| cp | ha-783738 cp ha-783738-m03:/home/docker/cp-test.txt | ha-783738 | jenkins | v1.35.0 | 17 Feb 25 11:50 UTC | 17 Feb 25 11:50 UTC |
| | ha-783738-m04:/home/docker/cp-test_ha-783738-m03_ha-783738-m04.txt | | | | | |
| ssh | ha-783738 ssh -n | ha-783738 | jenkins | v1.35.0 | 17 Feb 25 11:50 UTC | 17 Feb 25 11:50 UTC |
| | ha-783738-m03 sudo cat | | | | | |
| | /home/docker/cp-test.txt | | | | | |
| ssh | ha-783738 ssh -n ha-783738-m04 sudo cat | ha-783738 | jenkins | v1.35.0 | 17 Feb 25 11:50 UTC | 17 Feb 25 11:50 UTC |
| | /home/docker/cp-test_ha-783738-m03_ha-783738-m04.txt | | | | | |
| cp | ha-783738 cp testdata/cp-test.txt | ha-783738 | jenkins | v1.35.0 | 17 Feb 25 11:50 UTC | 17 Feb 25 11:50 UTC |
| | ha-783738-m04:/home/docker/cp-test.txt | | | | | |
| ssh | ha-783738 ssh -n | ha-783738 | jenkins | v1.35.0 | 17 Feb 25 11:50 UTC | 17 Feb 25 11:50 UTC |
| | ha-783738-m04 sudo cat | | | | | |
| | /home/docker/cp-test.txt | | | | | |
| cp | ha-783738 cp ha-783738-m04:/home/docker/cp-test.txt | ha-783738 | jenkins | v1.35.0 | 17 Feb 25 11:50 UTC | 17 Feb 25 11:51 UTC |
| | /tmp/TestMultiControlPlaneserialCopyFile3703533036/001/cp-test_ha-783738-m04.txt | | | | | |
| ssh | ha-783738 ssh -n | ha-783738 | jenkins | v1.35.0 | 17 Feb 25 11:51 UTC | 17 Feb 25 11:51 UTC |
| | ha-783738-m04 sudo cat | | | | | |
| | /home/docker/cp-test.txt | | | | | |
| cp | ha-783738 cp ha-783738-m04:/home/docker/cp-test.txt | ha-783738 | jenkins | v1.35.0 | 17 Feb 25 11:51 UTC | 17 Feb 25 11:51 UTC |
| | ha-783738:/home/docker/cp-test_ha-783738-m04_ha-783738.txt | | | | | |
| ssh | ha-783738 ssh -n | ha-783738 | jenkins | v1.35.0 | 17 Feb 25 11:51 UTC | 17 Feb 25 11:51 UTC |
| | ha-783738-m04 sudo cat | | | | | |
| | /home/docker/cp-test.txt | | | | | |
| ssh | ha-783738 ssh -n ha-783738 sudo cat | ha-783738 | jenkins | v1.35.0 | 17 Feb 25 11:51 UTC | 17 Feb 25 11:51 UTC |
| | /home/docker/cp-test_ha-783738-m04_ha-783738.txt | | | | | |
| cp | ha-783738 cp ha-783738-m04:/home/docker/cp-test.txt | ha-783738 | jenkins | v1.35.0 | 17 Feb 25 11:51 UTC | 17 Feb 25 11:51 UTC |
| | ha-783738-m02:/home/docker/cp-test_ha-783738-m04_ha-783738-m02.txt | | | | | |
| ssh | ha-783738 ssh -n | ha-783738 | jenkins | v1.35.0 | 17 Feb 25 11:51 UTC | 17 Feb 25 11:51 UTC |
| | ha-783738-m04 sudo cat | | | | | |
| | /home/docker/cp-test.txt | | | | | |
| ssh | ha-783738 ssh -n ha-783738-m02 sudo cat | ha-783738 | jenkins | v1.35.0 | 17 Feb 25 11:51 UTC | 17 Feb 25 11:51 UTC |
| | /home/docker/cp-test_ha-783738-m04_ha-783738-m02.txt | | | | | |
| cp | ha-783738 cp ha-783738-m04:/home/docker/cp-test.txt | ha-783738 | jenkins | v1.35.0 | 17 Feb 25 11:51 UTC | 17 Feb 25 11:51 UTC |
| | ha-783738-m03:/home/docker/cp-test_ha-783738-m04_ha-783738-m03.txt | | | | | |
| ssh | ha-783738 ssh -n | ha-783738 | jenkins | v1.35.0 | 17 Feb 25 11:51 UTC | 17 Feb 25 11:51 UTC |
| | ha-783738-m04 sudo cat | | | | | |
| | /home/docker/cp-test.txt | | | | | |
| ssh | ha-783738 ssh -n ha-783738-m03 sudo cat | ha-783738 | jenkins | v1.35.0 | 17 Feb 25 11:51 UTC | 17 Feb 25 11:51 UTC |
| | /home/docker/cp-test_ha-783738-m04_ha-783738-m03.txt | | | | | |
| node | ha-783738 node stop m02 -v=7 | ha-783738 | jenkins | v1.35.0 | 17 Feb 25 11:51 UTC | 17 Feb 25 11:51 UTC |
| | --alsologtostderr | | | | | |
| node | ha-783738 node start m02 -v=7 | ha-783738 | jenkins | v1.35.0 | 17 Feb 25 11:51 UTC | 17 Feb 25 11:51 UTC |
| | --alsologtostderr | | | | | |
| node | list -p ha-783738 -v=7 | ha-783738 | jenkins | v1.35.0 | 17 Feb 25 11:52 UTC | |
| | --alsologtostderr | | | | | |
| stop | -p ha-783738 -v=7 | ha-783738 | jenkins | v1.35.0 | 17 Feb 25 11:52 UTC | 17 Feb 25 11:52 UTC |
| | --alsologtostderr | | | | | |
| start | -p ha-783738 --wait=true -v=7 | ha-783738 | jenkins | v1.35.0 | 17 Feb 25 11:52 UTC | 17 Feb 25 11:56 UTC |
| | --alsologtostderr | | | | | |
| node | list -p ha-783738 | ha-783738 | jenkins | v1.35.0 | 17 Feb 25 11:56 UTC | |
| node | ha-783738 node delete m03 -v=7 | ha-783738 | jenkins | v1.35.0 | 17 Feb 25 11:56 UTC | 17 Feb 25 11:56 UTC |
| | --alsologtostderr | | | | | |
| stop | ha-783738 stop -v=7 | ha-783738 | jenkins | v1.35.0 | 17 Feb 25 11:56 UTC | 17 Feb 25 11:56 UTC |
| | --alsologtostderr | | | | | |
| start | -p ha-783738 --wait=true | ha-783738 | jenkins | v1.35.0 | 17 Feb 25 11:56 UTC | |
| | -v=7 --alsologtostderr | | | | | |
| | --driver=kvm2 | | | | | |
|---------|----------------------------------------------------------------------------------|-----------|---------|---------|---------------------|---------------------|
==> Last Start <==
Log file created at: 2025/02/17 11:56:50
Running on machine: ubuntu-20-agent-7
Binary: Built with gc go1.23.4 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I0217 11:56:50.215291 100380 out.go:345] Setting OutFile to fd 1 ...
I0217 11:56:50.215609 100380 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0217 11:56:50.215619 100380 out.go:358] Setting ErrFile to fd 2...
I0217 11:56:50.215624 100380 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0217 11:56:50.215819 100380 root.go:338] Updating PATH: /home/jenkins/minikube-integration/20427-77349/.minikube/bin
I0217 11:56:50.216353 100380 out.go:352] Setting JSON to false
I0217 11:56:50.217237 100380 start.go:129] hostinfo: {"hostname":"ubuntu-20-agent-7","uptime":5958,"bootTime":1739787452,"procs":182,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1075-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I0217 11:56:50.217362 100380 start.go:139] virtualization: kvm guest
I0217 11:56:50.219910 100380 out.go:177] * [ha-783738] minikube v1.35.0 on Ubuntu 20.04 (kvm/amd64)
I0217 11:56:50.221323 100380 out.go:177] - MINIKUBE_LOCATION=20427
I0217 11:56:50.221334 100380 notify.go:220] Checking for updates...
I0217 11:56:50.223835 100380 out.go:177] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I0217 11:56:50.224954 100380 out.go:177] - KUBECONFIG=/home/jenkins/minikube-integration/20427-77349/kubeconfig
I0217 11:56:50.226180 100380 out.go:177] - MINIKUBE_HOME=/home/jenkins/minikube-integration/20427-77349/.minikube
I0217 11:56:50.227361 100380 out.go:177] - MINIKUBE_BIN=out/minikube-linux-amd64
I0217 11:56:50.228473 100380 out.go:177] - MINIKUBE_FORCE_SYSTEMD=
I0217 11:56:50.229885 100380 config.go:182] Loaded profile config "ha-783738": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.32.1
I0217 11:56:50.230261 100380 main.go:141] libmachine: Found binary path at /home/jenkins/workspace/KVM_Linux_integration/out/docker-machine-driver-kvm2
I0217 11:56:50.230308 100380 main.go:141] libmachine: Launching plugin server for driver kvm2
I0217 11:56:50.245239 100380 main.go:141] libmachine: Plugin server listening at address 127.0.0.1:46091
I0217 11:56:50.245761 100380 main.go:141] libmachine: () Calling .GetVersion
I0217 11:56:50.246359 100380 main.go:141] libmachine: Using API Version 1
I0217 11:56:50.246382 100380 main.go:141] libmachine: () Calling .SetConfigRaw
I0217 11:56:50.246775 100380 main.go:141] libmachine: () Calling .GetMachineName
I0217 11:56:50.246962 100380 main.go:141] libmachine: (ha-783738) Calling .DriverName
I0217 11:56:50.247230 100380 driver.go:394] Setting default libvirt URI to qemu:///system
I0217 11:56:50.247538 100380 main.go:141] libmachine: Found binary path at /home/jenkins/workspace/KVM_Linux_integration/out/docker-machine-driver-kvm2
I0217 11:56:50.247594 100380 main.go:141] libmachine: Launching plugin server for driver kvm2
I0217 11:56:50.262713 100380 main.go:141] libmachine: Plugin server listening at address 127.0.0.1:36011
I0217 11:56:50.263097 100380 main.go:141] libmachine: () Calling .GetVersion
I0217 11:56:50.263692 100380 main.go:141] libmachine: Using API Version 1
I0217 11:56:50.263752 100380 main.go:141] libmachine: () Calling .SetConfigRaw
I0217 11:56:50.264059 100380 main.go:141] libmachine: () Calling .GetMachineName
I0217 11:56:50.264289 100380 main.go:141] libmachine: (ha-783738) Calling .DriverName
I0217 11:56:50.297981 100380 out.go:177] * Using the kvm2 driver based on existing profile
I0217 11:56:50.299143 100380 start.go:297] selected driver: kvm2
I0217 11:56:50.299155 100380 start.go:901] validating driver "kvm2" against &{Name:ha-783738 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube/iso/minikube-v1.35.0-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.46-1739182054-20387@sha256:3788b0691001f3da958b3956b3e6c1d1db8535d5286bd2e096e6e75dc609dbad Memory:2200 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.32.1 ClusterName:ha-78
3738 Namespace:default APIServerHAVIP:192.168.39.254 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.39.249 Port:8443 KubernetesVersion:v1.32.1 ContainerRuntime:docker ControlPlane:true Worker:true} {Name:m02 IP:192.168.39.31 Port:8443 KubernetesVersion:v1.32.1 ContainerRuntime:docker ControlPlane:true Worker:true} {Name:m04 IP:192.168.39.168 Port:0 KubernetesVersion:v1.32.1 ContainerRuntime: ControlPlane:false Worker:true}] Addons:map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:false efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false i
nspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:false storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOp
timizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0217 11:56:50.299304 100380 start.go:912] status for kvm2: {Installed:true Healthy:true Running:true NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I0217 11:56:50.299646 100380 install.go:52] acquiring lock: {Name:mk900956b073697a4aa6c80a27c6bb0742a99a53 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0217 11:56:50.299706 100380 install.go:117] Validating docker-machine-driver-kvm2, PATH=/home/jenkins/minikube-integration/20427-77349/.minikube/bin:/home/jenkins/workspace/KVM_Linux_integration/out/:/usr/local/bin:/usr/bin:/bin:/usr/local/games:/usr/games:/usr/local/go/bin:/home/jenkins/go/bin:/usr/local/bin/:/usr/local/go/bin/:/home/jenkins/go/bin
I0217 11:56:50.314229 100380 install.go:137] /home/jenkins/workspace/KVM_Linux_integration/out/docker-machine-driver-kvm2 version is 1.35.0
I0217 11:56:50.314917 100380 start_flags.go:947] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0217 11:56:50.314949 100380 cni.go:84] Creating CNI manager for ""
I0217 11:56:50.315000 100380 cni.go:136] multinode detected (3 nodes found), recommending kindnet
I0217 11:56:50.315060 100380 start.go:340] cluster config:
{Name:ha-783738 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube/iso/minikube-v1.35.0-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.46-1739182054-20387@sha256:3788b0691001f3da958b3956b3e6c1d1db8535d5286bd2e096e6e75dc609dbad Memory:2200 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.32.1 ClusterName:ha-783738 Namespace:default APIServerHAVIP:192.168.39.254 APIServerName:minikubeCA
APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.39.249 Port:8443 KubernetesVersion:v1.32.1 ContainerRuntime:docker ControlPlane:true Worker:true} {Name:m02 IP:192.168.39.31 Port:8443 KubernetesVersion:v1.32.1 ContainerRuntime:docker ControlPlane:true Worker:true} {Name:m04 IP:192.168.39.168 Port:0 KubernetesVersion:v1.32.1 ContainerRuntime:docker ControlPlane:false Worker:true}] Addons:map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:false efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kub
eflow:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:false storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMn
etClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0217 11:56:50.315190 100380 iso.go:125] acquiring lock: {Name:mk4380b7bda8fcd8bced9705ff1695c3fb7dac0d Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0217 11:56:50.317519 100380 out.go:177] * Starting "ha-783738" primary control-plane node in "ha-783738" cluster
I0217 11:56:50.318547 100380 preload.go:131] Checking if preload exists for k8s version v1.32.1 and runtime docker
I0217 11:56:50.318578 100380 preload.go:146] Found local preload: /home/jenkins/minikube-integration/20427-77349/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.32.1-docker-overlay2-amd64.tar.lz4
I0217 11:56:50.318588 100380 cache.go:56] Caching tarball of preloaded images
I0217 11:56:50.318681 100380 preload.go:172] Found /home/jenkins/minikube-integration/20427-77349/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.32.1-docker-overlay2-amd64.tar.lz4 in cache, skipping download
I0217 11:56:50.318695 100380 cache.go:59] Finished verifying existence of preloaded tar for v1.32.1 on docker
I0217 11:56:50.318829 100380 profile.go:143] Saving config to /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/config.json ...
I0217 11:56:50.319009 100380 start.go:360] acquireMachinesLock for ha-783738: {Name:mk05ba8323ae77ab7dcc14c378d65810d956fdc0 Clock:{} Delay:500ms Timeout:13m0s Cancel:<nil>}
I0217 11:56:50.319055 100380 start.go:364] duration metric: took 23.519µs to acquireMachinesLock for "ha-783738"
I0217 11:56:50.319080 100380 start.go:96] Skipping create...Using existing machine configuration
I0217 11:56:50.319088 100380 fix.go:54] fixHost starting:
I0217 11:56:50.319353 100380 main.go:141] libmachine: Found binary path at /home/jenkins/workspace/KVM_Linux_integration/out/docker-machine-driver-kvm2
I0217 11:56:50.319391 100380 main.go:141] libmachine: Launching plugin server for driver kvm2
I0217 11:56:50.333761 100380 main.go:141] libmachine: Plugin server listening at address 127.0.0.1:34803
I0217 11:56:50.334152 100380 main.go:141] libmachine: () Calling .GetVersion
I0217 11:56:50.334693 100380 main.go:141] libmachine: Using API Version 1
I0217 11:56:50.334714 100380 main.go:141] libmachine: () Calling .SetConfigRaw
I0217 11:56:50.335000 100380 main.go:141] libmachine: () Calling .GetMachineName
I0217 11:56:50.335210 100380 main.go:141] libmachine: (ha-783738) Calling .DriverName
I0217 11:56:50.335347 100380 main.go:141] libmachine: (ha-783738) Calling .GetState
I0217 11:56:50.336730 100380 fix.go:112] recreateIfNeeded on ha-783738: state=Stopped err=<nil>
I0217 11:56:50.336752 100380 main.go:141] libmachine: (ha-783738) Calling .DriverName
W0217 11:56:50.336864 100380 fix.go:138] unexpected machine state, will restart: <nil>
I0217 11:56:50.338814 100380 out.go:177] * Restarting existing kvm2 VM for "ha-783738" ...
I0217 11:56:50.340020 100380 main.go:141] libmachine: (ha-783738) Calling .Start
I0217 11:56:50.340200 100380 main.go:141] libmachine: (ha-783738) starting domain...
I0217 11:56:50.340221 100380 main.go:141] libmachine: (ha-783738) ensuring networks are active...
I0217 11:56:50.340845 100380 main.go:141] libmachine: (ha-783738) Ensuring network default is active
I0217 11:56:50.341268 100380 main.go:141] libmachine: (ha-783738) Ensuring network mk-ha-783738 is active
I0217 11:56:50.341612 100380 main.go:141] libmachine: (ha-783738) getting domain XML...
I0217 11:56:50.342286 100380 main.go:141] libmachine: (ha-783738) creating domain...
I0217 11:56:51.533335 100380 main.go:141] libmachine: (ha-783738) waiting for IP...
I0217 11:56:51.534198 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:56:51.534571 100380 main.go:141] libmachine: (ha-783738) DBG | unable to find current IP address of domain ha-783738 in network mk-ha-783738
I0217 11:56:51.534631 100380 main.go:141] libmachine: (ha-783738) DBG | I0217 11:56:51.534554 100416 retry.go:31] will retry after 214.112758ms: waiting for domain to come up
I0217 11:56:51.750038 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:56:51.750535 100380 main.go:141] libmachine: (ha-783738) DBG | unable to find current IP address of domain ha-783738 in network mk-ha-783738
I0217 11:56:51.750587 100380 main.go:141] libmachine: (ha-783738) DBG | I0217 11:56:51.750528 100416 retry.go:31] will retry after 287.575076ms: waiting for domain to come up
I0217 11:56:52.040019 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:56:52.040473 100380 main.go:141] libmachine: (ha-783738) DBG | unable to find current IP address of domain ha-783738 in network mk-ha-783738
I0217 11:56:52.040515 100380 main.go:141] libmachine: (ha-783738) DBG | I0217 11:56:52.040452 100416 retry.go:31] will retry after 303.389275ms: waiting for domain to come up
I0217 11:56:52.345057 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:56:52.345400 100380 main.go:141] libmachine: (ha-783738) DBG | unable to find current IP address of domain ha-783738 in network mk-ha-783738
I0217 11:56:52.345452 100380 main.go:141] libmachine: (ha-783738) DBG | I0217 11:56:52.345383 100416 retry.go:31] will retry after 580.610288ms: waiting for domain to come up
I0217 11:56:52.927102 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:56:52.927623 100380 main.go:141] libmachine: (ha-783738) DBG | unable to find current IP address of domain ha-783738 in network mk-ha-783738
I0217 11:56:52.927663 100380 main.go:141] libmachine: (ha-783738) DBG | I0217 11:56:52.927596 100416 retry.go:31] will retry after 470.88869ms: waiting for domain to come up
I0217 11:56:53.400293 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:56:53.400698 100380 main.go:141] libmachine: (ha-783738) DBG | unable to find current IP address of domain ha-783738 in network mk-ha-783738
I0217 11:56:53.400725 100380 main.go:141] libmachine: (ha-783738) DBG | I0217 11:56:53.400636 100416 retry.go:31] will retry after 645.102407ms: waiting for domain to come up
I0217 11:56:54.046798 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:56:54.047309 100380 main.go:141] libmachine: (ha-783738) DBG | unable to find current IP address of domain ha-783738 in network mk-ha-783738
I0217 11:56:54.047365 100380 main.go:141] libmachine: (ha-783738) DBG | I0217 11:56:54.047265 100416 retry.go:31] will retry after 993.016218ms: waiting for domain to come up
I0217 11:56:55.041450 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:56:55.041808 100380 main.go:141] libmachine: (ha-783738) DBG | unable to find current IP address of domain ha-783738 in network mk-ha-783738
I0217 11:56:55.041828 100380 main.go:141] libmachine: (ha-783738) DBG | I0217 11:56:55.041790 100416 retry.go:31] will retry after 1.096274529s: waiting for domain to come up
I0217 11:56:56.139475 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:56:56.139892 100380 main.go:141] libmachine: (ha-783738) DBG | unable to find current IP address of domain ha-783738 in network mk-ha-783738
I0217 11:56:56.139957 100380 main.go:141] libmachine: (ha-783738) DBG | I0217 11:56:56.139882 100416 retry.go:31] will retry after 1.840421804s: waiting for domain to come up
I0217 11:56:57.981618 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:56:57.982040 100380 main.go:141] libmachine: (ha-783738) DBG | unable to find current IP address of domain ha-783738 in network mk-ha-783738
I0217 11:56:57.982068 100380 main.go:141] libmachine: (ha-783738) DBG | I0217 11:56:57.981979 100416 retry.go:31] will retry after 1.8969141s: waiting for domain to come up
I0217 11:56:59.881026 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:56:59.881535 100380 main.go:141] libmachine: (ha-783738) DBG | unable to find current IP address of domain ha-783738 in network mk-ha-783738
I0217 11:56:59.881570 100380 main.go:141] libmachine: (ha-783738) DBG | I0217 11:56:59.881471 100416 retry.go:31] will retry after 1.890240518s: waiting for domain to come up
I0217 11:57:01.773274 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:01.773728 100380 main.go:141] libmachine: (ha-783738) DBG | unable to find current IP address of domain ha-783738 in network mk-ha-783738
I0217 11:57:01.773779 100380 main.go:141] libmachine: (ha-783738) DBG | I0217 11:57:01.773696 100416 retry.go:31] will retry after 3.046762911s: waiting for domain to come up
I0217 11:57:04.823999 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:04.824458 100380 main.go:141] libmachine: (ha-783738) DBG | unable to find current IP address of domain ha-783738 in network mk-ha-783738
I0217 11:57:04.824497 100380 main.go:141] libmachine: (ha-783738) DBG | I0217 11:57:04.824453 100416 retry.go:31] will retry after 3.819063496s: waiting for domain to come up
I0217 11:57:08.647831 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:08.648309 100380 main.go:141] libmachine: (ha-783738) found domain IP: 192.168.39.249
I0217 11:57:08.648334 100380 main.go:141] libmachine: (ha-783738) reserving static IP address...
I0217 11:57:08.648347 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has current primary IP address 192.168.39.249 and MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:08.648799 100380 main.go:141] libmachine: (ha-783738) DBG | found host DHCP lease matching {name: "ha-783738", mac: "52:54:00:fb:6f:65", ip: "192.168.39.249"} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:57:01 +0000 UTC Type:0 Mac:52:54:00:fb:6f:65 Iaid: IPaddr:192.168.39.249 Prefix:24 Hostname:ha-783738 Clientid:01:52:54:00:fb:6f:65}
I0217 11:57:08.648824 100380 main.go:141] libmachine: (ha-783738) DBG | skip adding static IP to network mk-ha-783738 - found existing host DHCP lease matching {name: "ha-783738", mac: "52:54:00:fb:6f:65", ip: "192.168.39.249"}
I0217 11:57:08.648835 100380 main.go:141] libmachine: (ha-783738) reserved static IP address 192.168.39.249 for domain ha-783738
I0217 11:57:08.648846 100380 main.go:141] libmachine: (ha-783738) waiting for SSH...
I0217 11:57:08.648862 100380 main.go:141] libmachine: (ha-783738) DBG | Getting to WaitForSSH function...
I0217 11:57:08.650828 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:08.651193 100380 main.go:141] libmachine: (ha-783738) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:fb:6f:65", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:57:01 +0000 UTC Type:0 Mac:52:54:00:fb:6f:65 Iaid: IPaddr:192.168.39.249 Prefix:24 Hostname:ha-783738 Clientid:01:52:54:00:fb:6f:65}
I0217 11:57:08.651224 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined IP address 192.168.39.249 and MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:08.651387 100380 main.go:141] libmachine: (ha-783738) DBG | Using SSH client type: external
I0217 11:57:08.651414 100380 main.go:141] libmachine: (ha-783738) DBG | Using SSH private key: /home/jenkins/minikube-integration/20427-77349/.minikube/machines/ha-783738/id_rsa (-rw-------)
I0217 11:57:08.651435 100380 main.go:141] libmachine: (ha-783738) DBG | &{[-F /dev/null -o ConnectionAttempts=3 -o ConnectTimeout=10 -o ControlMaster=no -o ControlPath=none -o LogLevel=quiet -o PasswordAuthentication=no -o ServerAliveInterval=60 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null docker@192.168.39.249 -o IdentitiesOnly=yes -i /home/jenkins/minikube-integration/20427-77349/.minikube/machines/ha-783738/id_rsa -p 22] /usr/bin/ssh <nil>}
I0217 11:57:08.651464 100380 main.go:141] libmachine: (ha-783738) DBG | About to run SSH command:
I0217 11:57:08.651480 100380 main.go:141] libmachine: (ha-783738) DBG | exit 0
I0217 11:57:08.776922 100380 main.go:141] libmachine: (ha-783738) DBG | SSH cmd err, output: <nil>:
I0217 11:57:08.777326 100380 main.go:141] libmachine: (ha-783738) Calling .GetConfigRaw
I0217 11:57:08.777959 100380 main.go:141] libmachine: (ha-783738) Calling .GetIP
I0217 11:57:08.780301 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:08.780692 100380 main.go:141] libmachine: (ha-783738) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:fb:6f:65", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:57:01 +0000 UTC Type:0 Mac:52:54:00:fb:6f:65 Iaid: IPaddr:192.168.39.249 Prefix:24 Hostname:ha-783738 Clientid:01:52:54:00:fb:6f:65}
I0217 11:57:08.780735 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined IP address 192.168.39.249 and MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:08.780948 100380 profile.go:143] Saving config to /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/config.json ...
I0217 11:57:08.781137 100380 machine.go:93] provisionDockerMachine start ...
I0217 11:57:08.781154 100380 main.go:141] libmachine: (ha-783738) Calling .DriverName
I0217 11:57:08.781442 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHHostname
I0217 11:57:08.783478 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:08.783868 100380 main.go:141] libmachine: (ha-783738) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:fb:6f:65", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:57:01 +0000 UTC Type:0 Mac:52:54:00:fb:6f:65 Iaid: IPaddr:192.168.39.249 Prefix:24 Hostname:ha-783738 Clientid:01:52:54:00:fb:6f:65}
I0217 11:57:08.783897 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined IP address 192.168.39.249 and MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:08.784048 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHPort
I0217 11:57:08.784237 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHKeyPath
I0217 11:57:08.784393 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHKeyPath
I0217 11:57:08.784570 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHUsername
I0217 11:57:08.784738 100380 main.go:141] libmachine: Using SSH client type: native
I0217 11:57:08.784917 100380 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x865ca0] 0x868980 <nil> [] 0s} 192.168.39.249 22 <nil> <nil>}
I0217 11:57:08.784928 100380 main.go:141] libmachine: About to run SSH command:
hostname
I0217 11:57:08.889484 100380 main.go:141] libmachine: SSH cmd err, output: <nil>: minikube
I0217 11:57:08.889525 100380 main.go:141] libmachine: (ha-783738) Calling .GetMachineName
I0217 11:57:08.889783 100380 buildroot.go:166] provisioning hostname "ha-783738"
I0217 11:57:08.889818 100380 main.go:141] libmachine: (ha-783738) Calling .GetMachineName
I0217 11:57:08.890003 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHHostname
I0217 11:57:08.892666 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:08.893027 100380 main.go:141] libmachine: (ha-783738) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:fb:6f:65", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:57:01 +0000 UTC Type:0 Mac:52:54:00:fb:6f:65 Iaid: IPaddr:192.168.39.249 Prefix:24 Hostname:ha-783738 Clientid:01:52:54:00:fb:6f:65}
I0217 11:57:08.893060 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined IP address 192.168.39.249 and MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:08.893202 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHPort
I0217 11:57:08.893391 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHKeyPath
I0217 11:57:08.893536 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHKeyPath
I0217 11:57:08.893661 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHUsername
I0217 11:57:08.893787 100380 main.go:141] libmachine: Using SSH client type: native
I0217 11:57:08.893949 100380 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x865ca0] 0x868980 <nil> [] 0s} 192.168.39.249 22 <nil> <nil>}
I0217 11:57:08.893960 100380 main.go:141] libmachine: About to run SSH command:
sudo hostname ha-783738 && echo "ha-783738" | sudo tee /etc/hostname
I0217 11:57:09.014626 100380 main.go:141] libmachine: SSH cmd err, output: <nil>: ha-783738
I0217 11:57:09.014653 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHHostname
I0217 11:57:09.017274 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:09.017710 100380 main.go:141] libmachine: (ha-783738) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:fb:6f:65", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:57:01 +0000 UTC Type:0 Mac:52:54:00:fb:6f:65 Iaid: IPaddr:192.168.39.249 Prefix:24 Hostname:ha-783738 Clientid:01:52:54:00:fb:6f:65}
I0217 11:57:09.017744 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined IP address 192.168.39.249 and MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:09.017939 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHPort
I0217 11:57:09.018131 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHKeyPath
I0217 11:57:09.018348 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHKeyPath
I0217 11:57:09.018473 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHUsername
I0217 11:57:09.018701 100380 main.go:141] libmachine: Using SSH client type: native
I0217 11:57:09.018967 100380 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x865ca0] 0x868980 <nil> [] 0s} 192.168.39.249 22 <nil> <nil>}
I0217 11:57:09.018994 100380 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\sha-783738' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 ha-783738/g' /etc/hosts;
else
echo '127.0.1.1 ha-783738' | sudo tee -a /etc/hosts;
fi
fi
I0217 11:57:09.133208 100380 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0217 11:57:09.133247 100380 buildroot.go:172] set auth options {CertDir:/home/jenkins/minikube-integration/20427-77349/.minikube CaCertPath:/home/jenkins/minikube-integration/20427-77349/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/20427-77349/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/20427-77349/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/20427-77349/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/20427-77349/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/20427-77349/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/20427-77349/.minikube}
I0217 11:57:09.133278 100380 buildroot.go:174] setting up certificates
I0217 11:57:09.133295 100380 provision.go:84] configureAuth start
I0217 11:57:09.133331 100380 main.go:141] libmachine: (ha-783738) Calling .GetMachineName
I0217 11:57:09.133680 100380 main.go:141] libmachine: (ha-783738) Calling .GetIP
I0217 11:57:09.136393 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:09.136746 100380 main.go:141] libmachine: (ha-783738) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:fb:6f:65", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:57:01 +0000 UTC Type:0 Mac:52:54:00:fb:6f:65 Iaid: IPaddr:192.168.39.249 Prefix:24 Hostname:ha-783738 Clientid:01:52:54:00:fb:6f:65}
I0217 11:57:09.136771 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined IP address 192.168.39.249 and MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:09.136918 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHHostname
I0217 11:57:09.139192 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:09.139545 100380 main.go:141] libmachine: (ha-783738) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:fb:6f:65", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:57:01 +0000 UTC Type:0 Mac:52:54:00:fb:6f:65 Iaid: IPaddr:192.168.39.249 Prefix:24 Hostname:ha-783738 Clientid:01:52:54:00:fb:6f:65}
I0217 11:57:09.139583 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined IP address 192.168.39.249 and MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:09.139699 100380 provision.go:143] copyHostCerts
I0217 11:57:09.139734 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/certs/ca.pem -> /home/jenkins/minikube-integration/20427-77349/.minikube/ca.pem
I0217 11:57:09.139786 100380 exec_runner.go:144] found /home/jenkins/minikube-integration/20427-77349/.minikube/ca.pem, removing ...
I0217 11:57:09.139804 100380 exec_runner.go:203] rm: /home/jenkins/minikube-integration/20427-77349/.minikube/ca.pem
I0217 11:57:09.139883 100380 exec_runner.go:151] cp: /home/jenkins/minikube-integration/20427-77349/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/20427-77349/.minikube/ca.pem (1082 bytes)
I0217 11:57:09.139996 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/certs/cert.pem -> /home/jenkins/minikube-integration/20427-77349/.minikube/cert.pem
I0217 11:57:09.140023 100380 exec_runner.go:144] found /home/jenkins/minikube-integration/20427-77349/.minikube/cert.pem, removing ...
I0217 11:57:09.140030 100380 exec_runner.go:203] rm: /home/jenkins/minikube-integration/20427-77349/.minikube/cert.pem
I0217 11:57:09.140079 100380 exec_runner.go:151] cp: /home/jenkins/minikube-integration/20427-77349/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/20427-77349/.minikube/cert.pem (1123 bytes)
I0217 11:57:09.140159 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/certs/key.pem -> /home/jenkins/minikube-integration/20427-77349/.minikube/key.pem
I0217 11:57:09.140184 100380 exec_runner.go:144] found /home/jenkins/minikube-integration/20427-77349/.minikube/key.pem, removing ...
I0217 11:57:09.140191 100380 exec_runner.go:203] rm: /home/jenkins/minikube-integration/20427-77349/.minikube/key.pem
I0217 11:57:09.140228 100380 exec_runner.go:151] cp: /home/jenkins/minikube-integration/20427-77349/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/20427-77349/.minikube/key.pem (1675 bytes)
I0217 11:57:09.140314 100380 provision.go:117] generating server cert: /home/jenkins/minikube-integration/20427-77349/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/20427-77349/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/20427-77349/.minikube/certs/ca-key.pem org=jenkins.ha-783738 san=[127.0.0.1 192.168.39.249 ha-783738 localhost minikube]
I0217 11:57:09.269804 100380 provision.go:177] copyRemoteCerts
I0217 11:57:09.269900 100380 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0217 11:57:09.269935 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHHostname
I0217 11:57:09.272592 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:09.272916 100380 main.go:141] libmachine: (ha-783738) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:fb:6f:65", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:57:01 +0000 UTC Type:0 Mac:52:54:00:fb:6f:65 Iaid: IPaddr:192.168.39.249 Prefix:24 Hostname:ha-783738 Clientid:01:52:54:00:fb:6f:65}
I0217 11:57:09.272945 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined IP address 192.168.39.249 and MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:09.273095 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHPort
I0217 11:57:09.273282 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHKeyPath
I0217 11:57:09.273464 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHUsername
I0217 11:57:09.273600 100380 sshutil.go:53] new ssh client: &{IP:192.168.39.249 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/20427-77349/.minikube/machines/ha-783738/id_rsa Username:docker}
I0217 11:57:09.355256 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/certs/ca.pem -> /etc/docker/ca.pem
I0217 11:57:09.355331 100380 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20427-77349/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I0217 11:57:09.378132 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/machines/server.pem -> /etc/docker/server.pem
I0217 11:57:09.378243 100380 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20427-77349/.minikube/machines/server.pem --> /etc/docker/server.pem (1200 bytes)
I0217 11:57:09.399749 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/machines/server-key.pem -> /etc/docker/server-key.pem
I0217 11:57:09.399830 100380 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20427-77349/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I0217 11:57:09.421183 100380 provision.go:87] duration metric: took 287.855291ms to configureAuth
I0217 11:57:09.421207 100380 buildroot.go:189] setting minikube options for container-runtime
I0217 11:57:09.421432 100380 config.go:182] Loaded profile config "ha-783738": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.32.1
I0217 11:57:09.421460 100380 main.go:141] libmachine: (ha-783738) Calling .DriverName
I0217 11:57:09.421765 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHHostname
I0217 11:57:09.424701 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:09.425141 100380 main.go:141] libmachine: (ha-783738) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:fb:6f:65", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:57:01 +0000 UTC Type:0 Mac:52:54:00:fb:6f:65 Iaid: IPaddr:192.168.39.249 Prefix:24 Hostname:ha-783738 Clientid:01:52:54:00:fb:6f:65}
I0217 11:57:09.425173 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined IP address 192.168.39.249 and MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:09.425370 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHPort
I0217 11:57:09.425557 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHKeyPath
I0217 11:57:09.425734 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHKeyPath
I0217 11:57:09.425883 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHUsername
I0217 11:57:09.426059 100380 main.go:141] libmachine: Using SSH client type: native
I0217 11:57:09.426283 100380 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x865ca0] 0x868980 <nil> [] 0s} 192.168.39.249 22 <nil> <nil>}
I0217 11:57:09.426297 100380 main.go:141] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I0217 11:57:09.534976 100380 main.go:141] libmachine: SSH cmd err, output: <nil>: tmpfs
I0217 11:57:09.535006 100380 buildroot.go:70] root file system type: tmpfs
I0217 11:57:09.535125 100380 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I0217 11:57:09.535163 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHHostname
I0217 11:57:09.537739 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:09.538108 100380 main.go:141] libmachine: (ha-783738) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:fb:6f:65", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:57:01 +0000 UTC Type:0 Mac:52:54:00:fb:6f:65 Iaid: IPaddr:192.168.39.249 Prefix:24 Hostname:ha-783738 Clientid:01:52:54:00:fb:6f:65}
I0217 11:57:09.538126 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined IP address 192.168.39.249 and MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:09.538307 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHPort
I0217 11:57:09.538481 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHKeyPath
I0217 11:57:09.538662 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHKeyPath
I0217 11:57:09.538802 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHUsername
I0217 11:57:09.538949 100380 main.go:141] libmachine: Using SSH client type: native
I0217 11:57:09.539142 100380 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x865ca0] 0x868980 <nil> [] 0s} 192.168.39.249 22 <nil> <nil>}
I0217 11:57:09.539243 100380 main.go:141] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network.target minikube-automount.service docker.socket
Requires= minikube-automount.service docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I0217 11:57:09.658326 100380 main.go:141] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network.target minikube-automount.service docker.socket
Requires= minikube-automount.service docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
I0217 11:57:09.658371 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHHostname
I0217 11:57:09.661372 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:09.661838 100380 main.go:141] libmachine: (ha-783738) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:fb:6f:65", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:57:01 +0000 UTC Type:0 Mac:52:54:00:fb:6f:65 Iaid: IPaddr:192.168.39.249 Prefix:24 Hostname:ha-783738 Clientid:01:52:54:00:fb:6f:65}
I0217 11:57:09.661875 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined IP address 192.168.39.249 and MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:09.662085 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHPort
I0217 11:57:09.662300 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHKeyPath
I0217 11:57:09.662435 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHKeyPath
I0217 11:57:09.662559 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHUsername
I0217 11:57:09.662707 100380 main.go:141] libmachine: Using SSH client type: native
I0217 11:57:09.662897 100380 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x865ca0] 0x868980 <nil> [] 0s} 192.168.39.249 22 <nil> <nil>}
I0217 11:57:09.662913 100380 main.go:141] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I0217 11:57:11.588699 100380 main.go:141] libmachine: SSH cmd err, output: <nil>: diff: can't stat '/lib/systemd/system/docker.service': No such file or directory
Created symlink /etc/systemd/system/multi-user.target.wants/docker.service → /usr/lib/systemd/system/docker.service.
I0217 11:57:11.588766 100380 machine.go:96] duration metric: took 2.807616414s to provisionDockerMachine
I0217 11:57:11.588782 100380 start.go:293] postStartSetup for "ha-783738" (driver="kvm2")
I0217 11:57:11.588792 100380 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0217 11:57:11.588810 100380 main.go:141] libmachine: (ha-783738) Calling .DriverName
I0217 11:57:11.589177 100380 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0217 11:57:11.589221 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHHostname
I0217 11:57:11.592192 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:11.592596 100380 main.go:141] libmachine: (ha-783738) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:fb:6f:65", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:57:01 +0000 UTC Type:0 Mac:52:54:00:fb:6f:65 Iaid: IPaddr:192.168.39.249 Prefix:24 Hostname:ha-783738 Clientid:01:52:54:00:fb:6f:65}
I0217 11:57:11.592627 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined IP address 192.168.39.249 and MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:11.592785 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHPort
I0217 11:57:11.592979 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHKeyPath
I0217 11:57:11.593170 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHUsername
I0217 11:57:11.593334 100380 sshutil.go:53] new ssh client: &{IP:192.168.39.249 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/20427-77349/.minikube/machines/ha-783738/id_rsa Username:docker}
I0217 11:57:11.675232 100380 ssh_runner.go:195] Run: cat /etc/os-release
I0217 11:57:11.679319 100380 info.go:137] Remote host: Buildroot 2023.02.9
I0217 11:57:11.679347 100380 filesync.go:126] Scanning /home/jenkins/minikube-integration/20427-77349/.minikube/addons for local assets ...
I0217 11:57:11.679434 100380 filesync.go:126] Scanning /home/jenkins/minikube-integration/20427-77349/.minikube/files for local assets ...
I0217 11:57:11.679553 100380 filesync.go:149] local asset: /home/jenkins/minikube-integration/20427-77349/.minikube/files/etc/ssl/certs/845022.pem -> 845022.pem in /etc/ssl/certs
I0217 11:57:11.679569 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/files/etc/ssl/certs/845022.pem -> /etc/ssl/certs/845022.pem
I0217 11:57:11.679700 100380 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I0217 11:57:11.688596 100380 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20427-77349/.minikube/files/etc/ssl/certs/845022.pem --> /etc/ssl/certs/845022.pem (1708 bytes)
I0217 11:57:11.712948 100380 start.go:296] duration metric: took 124.147315ms for postStartSetup
I0217 11:57:11.713041 100380 main.go:141] libmachine: (ha-783738) Calling .DriverName
I0217 11:57:11.713388 100380 ssh_runner.go:195] Run: sudo ls --almost-all -1 /var/lib/minikube/backup
I0217 11:57:11.713431 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHHostname
I0217 11:57:11.716109 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:11.716482 100380 main.go:141] libmachine: (ha-783738) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:fb:6f:65", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:57:01 +0000 UTC Type:0 Mac:52:54:00:fb:6f:65 Iaid: IPaddr:192.168.39.249 Prefix:24 Hostname:ha-783738 Clientid:01:52:54:00:fb:6f:65}
I0217 11:57:11.716509 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined IP address 192.168.39.249 and MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:11.716697 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHPort
I0217 11:57:11.716902 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHKeyPath
I0217 11:57:11.717111 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHUsername
I0217 11:57:11.717237 100380 sshutil.go:53] new ssh client: &{IP:192.168.39.249 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/20427-77349/.minikube/machines/ha-783738/id_rsa Username:docker}
I0217 11:57:11.799568 100380 machine.go:197] restoring vm config from /var/lib/minikube/backup: [etc]
I0217 11:57:11.799647 100380 ssh_runner.go:195] Run: sudo rsync --archive --update /var/lib/minikube/backup/etc /
I0217 11:57:11.840659 100380 fix.go:56] duration metric: took 21.521561421s for fixHost
I0217 11:57:11.840710 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHHostname
I0217 11:57:11.843711 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:11.844159 100380 main.go:141] libmachine: (ha-783738) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:fb:6f:65", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:57:01 +0000 UTC Type:0 Mac:52:54:00:fb:6f:65 Iaid: IPaddr:192.168.39.249 Prefix:24 Hostname:ha-783738 Clientid:01:52:54:00:fb:6f:65}
I0217 11:57:11.844211 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined IP address 192.168.39.249 and MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:11.844334 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHPort
I0217 11:57:11.844538 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHKeyPath
I0217 11:57:11.844685 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHKeyPath
I0217 11:57:11.844877 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHUsername
I0217 11:57:11.845064 100380 main.go:141] libmachine: Using SSH client type: native
I0217 11:57:11.845292 100380 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x865ca0] 0x868980 <nil> [] 0s} 192.168.39.249 22 <nil> <nil>}
I0217 11:57:11.845324 100380 main.go:141] libmachine: About to run SSH command:
date +%s.%N
I0217 11:57:11.961693 100380 main.go:141] libmachine: SSH cmd err, output: <nil>: 1739793431.919777749
I0217 11:57:11.961720 100380 fix.go:216] guest clock: 1739793431.919777749
I0217 11:57:11.961728 100380 fix.go:229] Guest: 2025-02-17 11:57:11.919777749 +0000 UTC Remote: 2025-02-17 11:57:11.840688548 +0000 UTC m=+21.663425668 (delta=79.089201ms)
I0217 11:57:11.961764 100380 fix.go:200] guest clock delta is within tolerance: 79.089201ms
I0217 11:57:11.961771 100380 start.go:83] releasing machines lock for "ha-783738", held for 21.642703542s
I0217 11:57:11.961797 100380 main.go:141] libmachine: (ha-783738) Calling .DriverName
I0217 11:57:11.962076 100380 main.go:141] libmachine: (ha-783738) Calling .GetIP
I0217 11:57:11.964739 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:11.965072 100380 main.go:141] libmachine: (ha-783738) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:fb:6f:65", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:57:01 +0000 UTC Type:0 Mac:52:54:00:fb:6f:65 Iaid: IPaddr:192.168.39.249 Prefix:24 Hostname:ha-783738 Clientid:01:52:54:00:fb:6f:65}
I0217 11:57:11.965098 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined IP address 192.168.39.249 and MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:11.965245 100380 main.go:141] libmachine: (ha-783738) Calling .DriverName
I0217 11:57:11.965780 100380 main.go:141] libmachine: (ha-783738) Calling .DriverName
I0217 11:57:11.965938 100380 main.go:141] libmachine: (ha-783738) Calling .DriverName
I0217 11:57:11.966020 100380 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0217 11:57:11.966085 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHHostname
I0217 11:57:11.966153 100380 ssh_runner.go:195] Run: cat /version.json
I0217 11:57:11.966182 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHHostname
I0217 11:57:11.968710 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:11.968814 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:11.969180 100380 main.go:141] libmachine: (ha-783738) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:fb:6f:65", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:57:01 +0000 UTC Type:0 Mac:52:54:00:fb:6f:65 Iaid: IPaddr:192.168.39.249 Prefix:24 Hostname:ha-783738 Clientid:01:52:54:00:fb:6f:65}
I0217 11:57:11.969211 100380 main.go:141] libmachine: (ha-783738) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:fb:6f:65", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:57:01 +0000 UTC Type:0 Mac:52:54:00:fb:6f:65 Iaid: IPaddr:192.168.39.249 Prefix:24 Hostname:ha-783738 Clientid:01:52:54:00:fb:6f:65}
I0217 11:57:11.969228 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined IP address 192.168.39.249 and MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:11.969243 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined IP address 192.168.39.249 and MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:11.969400 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHPort
I0217 11:57:11.969505 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHPort
I0217 11:57:11.969573 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHKeyPath
I0217 11:57:11.969654 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHKeyPath
I0217 11:57:11.969705 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHUsername
I0217 11:57:11.969780 100380 main.go:141] libmachine: (ha-783738) Calling .GetSSHUsername
I0217 11:57:11.969855 100380 sshutil.go:53] new ssh client: &{IP:192.168.39.249 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/20427-77349/.minikube/machines/ha-783738/id_rsa Username:docker}
I0217 11:57:11.969896 100380 sshutil.go:53] new ssh client: &{IP:192.168.39.249 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/20427-77349/.minikube/machines/ha-783738/id_rsa Username:docker}
I0217 11:57:12.070993 100380 ssh_runner.go:195] Run: systemctl --version
I0217 11:57:12.076962 100380 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W0217 11:57:12.082069 100380 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I0217 11:57:12.082164 100380 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0217 11:57:12.097308 100380 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
I0217 11:57:12.097353 100380 start.go:495] detecting cgroup driver to use...
I0217 11:57:12.097502 100380 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0217 11:57:12.116857 100380 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10"|' /etc/containerd/config.toml"
I0217 11:57:12.128177 100380 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0217 11:57:12.139383 100380 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I0217 11:57:12.139433 100380 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I0217 11:57:12.150535 100380 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0217 11:57:12.161824 100380 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0217 11:57:12.173075 100380 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0217 11:57:12.184735 100380 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0217 11:57:12.196065 100380 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0217 11:57:12.206061 100380 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0217 11:57:12.215826 100380 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0217 11:57:12.225719 100380 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0217 11:57:12.234589 100380 crio.go:166] couldn't verify netfilter by "sudo sysctl net.bridge.bridge-nf-call-iptables" which might be okay. error: sudo sysctl net.bridge.bridge-nf-call-iptables: Process exited with status 255
stdout:
stderr:
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
I0217 11:57:12.234644 100380 ssh_runner.go:195] Run: sudo modprobe br_netfilter
I0217 11:57:12.244581 100380 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0217 11:57:12.253602 100380 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0217 11:57:12.359116 100380 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0217 11:57:12.382906 100380 start.go:495] detecting cgroup driver to use...
I0217 11:57:12.383010 100380 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I0217 11:57:12.408300 100380 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I0217 11:57:12.424027 100380 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
I0217 11:57:12.444833 100380 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I0217 11:57:12.457628 100380 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0217 11:57:12.470140 100380 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I0217 11:57:12.497764 100380 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0217 11:57:12.511071 100380 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I0217 11:57:12.529141 100380 ssh_runner.go:195] Run: which cri-dockerd
I0217 11:57:12.532846 100380 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I0217 11:57:12.541895 100380 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (190 bytes)
I0217 11:57:12.557198 100380 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I0217 11:57:12.670128 100380 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I0217 11:57:12.796263 100380 docker.go:574] configuring docker to use "cgroupfs" as cgroup driver...
I0217 11:57:12.796399 100380 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
I0217 11:57:12.812229 100380 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0217 11:57:12.923350 100380 ssh_runner.go:195] Run: sudo systemctl restart docker
I0217 11:57:15.351609 100380 ssh_runner.go:235] Completed: sudo systemctl restart docker: (2.428206669s)
I0217 11:57:15.351699 100380 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I0217 11:57:15.364852 100380 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0217 11:57:15.377423 100380 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I0217 11:57:15.493635 100380 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I0217 11:57:15.621524 100380 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0217 11:57:15.730858 100380 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I0217 11:57:15.748138 100380 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0217 11:57:15.761818 100380 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0217 11:57:15.881775 100380 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I0217 11:57:15.960772 100380 start.go:542] Will wait 60s for socket path /var/run/cri-dockerd.sock
I0217 11:57:15.960858 100380 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I0217 11:57:15.966411 100380 start.go:563] Will wait 60s for crictl version
I0217 11:57:15.966517 100380 ssh_runner.go:195] Run: which crictl
I0217 11:57:15.974036 100380 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0217 11:57:16.011837 100380 start.go:579] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 27.4.0
RuntimeApiVersion: v1
I0217 11:57:16.011912 100380 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0217 11:57:16.036945 100380 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0217 11:57:16.060974 100380 out.go:235] * Preparing Kubernetes v1.32.1 on Docker 27.4.0 ...
I0217 11:57:16.061031 100380 main.go:141] libmachine: (ha-783738) Calling .GetIP
I0217 11:57:16.063810 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:16.064255 100380 main.go:141] libmachine: (ha-783738) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:fb:6f:65", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:57:01 +0000 UTC Type:0 Mac:52:54:00:fb:6f:65 Iaid: IPaddr:192.168.39.249 Prefix:24 Hostname:ha-783738 Clientid:01:52:54:00:fb:6f:65}
I0217 11:57:16.064298 100380 main.go:141] libmachine: (ha-783738) DBG | domain ha-783738 has defined IP address 192.168.39.249 and MAC address 52:54:00:fb:6f:65 in network mk-ha-783738
I0217 11:57:16.064499 100380 ssh_runner.go:195] Run: grep 192.168.39.1 host.minikube.internal$ /etc/hosts
I0217 11:57:16.068464 100380 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.39.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0217 11:57:16.080668 100380 kubeadm.go:883] updating cluster {Name:ha-783738 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube/iso/minikube-v1.35.0-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.46-1739182054-20387@sha256:3788b0691001f3da958b3956b3e6c1d1db8535d5286bd2e096e6e75dc609dbad Memory:2200 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.32.1 ClusterName:ha-783738 Namespace:
default APIServerHAVIP:192.168.39.254 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.39.249 Port:8443 KubernetesVersion:v1.32.1 ContainerRuntime:docker ControlPlane:true Worker:true} {Name:m02 IP:192.168.39.31 Port:8443 KubernetesVersion:v1.32.1 ContainerRuntime:docker ControlPlane:true Worker:true} {Name:m04 IP:192.168.39.168 Port:0 KubernetesVersion:v1.32.1 ContainerRuntime:docker ControlPlane:false Worker:true}] Addons:map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:false efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-
gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:false storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizatio
ns:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I0217 11:57:16.080804 100380 preload.go:131] Checking if preload exists for k8s version v1.32.1 and runtime docker
I0217 11:57:16.080849 100380 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0217 11:57:16.098890 100380 docker.go:689] Got preloaded images: -- stdout --
kindest/kindnetd:v20250214-acbabc1a
registry.k8s.io/kube-apiserver:v1.32.1
registry.k8s.io/kube-scheduler:v1.32.1
registry.k8s.io/kube-controller-manager:v1.32.1
registry.k8s.io/kube-proxy:v1.32.1
ghcr.io/kube-vip/kube-vip:v0.8.9
registry.k8s.io/etcd:3.5.16-0
registry.k8s.io/coredns/coredns:v1.11.3
registry.k8s.io/pause:3.10
gcr.io/k8s-minikube/storage-provisioner:v5
gcr.io/k8s-minikube/busybox:1.28
-- /stdout --
I0217 11:57:16.098911 100380 docker.go:619] Images already preloaded, skipping extraction
I0217 11:57:16.098974 100380 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0217 11:57:16.116506 100380 docker.go:689] Got preloaded images: -- stdout --
kindest/kindnetd:v20250214-acbabc1a
registry.k8s.io/kube-apiserver:v1.32.1
registry.k8s.io/kube-scheduler:v1.32.1
registry.k8s.io/kube-controller-manager:v1.32.1
registry.k8s.io/kube-proxy:v1.32.1
ghcr.io/kube-vip/kube-vip:v0.8.9
registry.k8s.io/etcd:3.5.16-0
registry.k8s.io/coredns/coredns:v1.11.3
registry.k8s.io/pause:3.10
gcr.io/k8s-minikube/storage-provisioner:v5
gcr.io/k8s-minikube/busybox:1.28
-- /stdout --
I0217 11:57:16.116540 100380 cache_images.go:84] Images are preloaded, skipping loading
I0217 11:57:16.116556 100380 kubeadm.go:934] updating node { 192.168.39.249 8443 v1.32.1 docker true true} ...
I0217 11:57:16.116703 100380 kubeadm.go:946] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.32.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=ha-783738 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.39.249
[Install]
config:
{KubernetesVersion:v1.32.1 ClusterName:ha-783738 Namespace:default APIServerHAVIP:192.168.39.254 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0217 11:57:16.116764 100380 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
I0217 11:57:16.164431 100380 cni.go:84] Creating CNI manager for ""
I0217 11:57:16.164455 100380 cni.go:136] multinode detected (3 nodes found), recommending kindnet
I0217 11:57:16.164469 100380 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
I0217 11:57:16.164499 100380 kubeadm.go:189] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.39.249 APIServerPort:8443 KubernetesVersion:v1.32.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:ha-783738 NodeName:ha-783738 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.39.249"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.39.249 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kuberne
tes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I0217 11:57:16.164682 100380 kubeadm.go:195] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.39.249
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
name: "ha-783738"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.39.249"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.39.249"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
- name: "proxy-refresh-interval"
value: "70000"
kubernetesVersion: v1.32.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I0217 11:57:16.164704 100380 kube-vip.go:115] generating kube-vip config ...
I0217 11:57:16.164766 100380 ssh_runner.go:195] Run: sudo sh -c "modprobe --all ip_vs ip_vs_rr ip_vs_wrr ip_vs_sh nf_conntrack"
I0217 11:57:16.178981 100380 kube-vip.go:167] auto-enabling control-plane load-balancing in kube-vip
I0217 11:57:16.179102 100380 kube-vip.go:137] kube-vip config:
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
name: kube-vip
namespace: kube-system
spec:
containers:
- args:
- manager
env:
- name: vip_arp
value: "true"
- name: port
value: "8443"
- name: vip_nodename
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: vip_interface
value: eth0
- name: vip_cidr
value: "32"
- name: dns_mode
value: first
- name: cp_enable
value: "true"
- name: cp_namespace
value: kube-system
- name: vip_leaderelection
value: "true"
- name: vip_leasename
value: plndr-cp-lock
- name: vip_leaseduration
value: "5"
- name: vip_renewdeadline
value: "3"
- name: vip_retryperiod
value: "1"
- name: address
value: 192.168.39.254
- name: prometheus_server
value: :2112
- name : lb_enable
value: "true"
- name: lb_port
value: "8443"
image: ghcr.io/kube-vip/kube-vip:v0.8.9
imagePullPolicy: IfNotPresent
name: kube-vip
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_RAW
volumeMounts:
- mountPath: /etc/kubernetes/admin.conf
name: kubeconfig
hostAliases:
- hostnames:
- kubernetes
ip: 127.0.0.1
hostNetwork: true
volumes:
- hostPath:
path: "/etc/kubernetes/admin.conf"
name: kubeconfig
status: {}
I0217 11:57:16.179161 100380 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.32.1
I0217 11:57:16.189237 100380 binaries.go:44] Found k8s binaries, skipping transfer
I0217 11:57:16.189321 100380 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube /etc/kubernetes/manifests
I0217 11:57:16.198727 100380 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (310 bytes)
I0217 11:57:16.214787 100380 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0217 11:57:16.231014 100380 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2293 bytes)
I0217 11:57:16.246729 100380 ssh_runner.go:362] scp memory --> /etc/kubernetes/manifests/kube-vip.yaml (1441 bytes)
I0217 11:57:16.261779 100380 ssh_runner.go:195] Run: grep 192.168.39.254 control-plane.minikube.internal$ /etc/hosts
I0217 11:57:16.265453 100380 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.39.254 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0217 11:57:16.276521 100380 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0217 11:57:16.384249 100380 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0217 11:57:16.401291 100380 certs.go:68] Setting up /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738 for IP: 192.168.39.249
I0217 11:57:16.401328 100380 certs.go:194] generating shared ca certs ...
I0217 11:57:16.401350 100380 certs.go:226] acquiring lock for ca certs: {Name:mk7093571229e43ae88bf2507ccc9fd2cd05388e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0217 11:57:16.401508 100380 certs.go:235] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/20427-77349/.minikube/ca.key
I0217 11:57:16.401544 100380 certs.go:235] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/20427-77349/.minikube/proxy-client-ca.key
I0217 11:57:16.401555 100380 certs.go:256] generating profile certs ...
I0217 11:57:16.401635 100380 certs.go:359] skipping valid signed profile cert regeneration for "minikube-user": /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/client.key
I0217 11:57:16.401660 100380 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/apiserver.key.1b1cbf3b
I0217 11:57:16.401671 100380 crypto.go:68] Generating cert /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/apiserver.crt.1b1cbf3b with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.39.249 192.168.39.31 192.168.39.254]
I0217 11:57:16.475033 100380 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/apiserver.crt.1b1cbf3b ...
I0217 11:57:16.475062 100380 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/apiserver.crt.1b1cbf3b: {Name:mkcae1f9f128e66451afcd5b133e6826e9862cbe Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0217 11:57:16.475228 100380 crypto.go:164] Writing key to /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/apiserver.key.1b1cbf3b ...
I0217 11:57:16.475243 100380 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/apiserver.key.1b1cbf3b: {Name:mk484c481609a3c2ed473dfecb8f5468118b1367 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0217 11:57:16.475330 100380 certs.go:381] copying /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/apiserver.crt.1b1cbf3b -> /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/apiserver.crt
I0217 11:57:16.475492 100380 certs.go:385] copying /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/apiserver.key.1b1cbf3b -> /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/apiserver.key
I0217 11:57:16.475629 100380 certs.go:359] skipping valid signed profile cert regeneration for "aggregator": /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/proxy-client.key
I0217 11:57:16.475644 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/ca.crt -> /var/lib/minikube/certs/ca.crt
I0217 11:57:16.475656 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/ca.key -> /var/lib/minikube/certs/ca.key
I0217 11:57:16.475671 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/proxy-client-ca.crt -> /var/lib/minikube/certs/proxy-client-ca.crt
I0217 11:57:16.475699 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/proxy-client-ca.key -> /var/lib/minikube/certs/proxy-client-ca.key
I0217 11:57:16.475714 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/apiserver.crt -> /var/lib/minikube/certs/apiserver.crt
I0217 11:57:16.475726 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/apiserver.key -> /var/lib/minikube/certs/apiserver.key
I0217 11:57:16.475737 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/proxy-client.crt -> /var/lib/minikube/certs/proxy-client.crt
I0217 11:57:16.475748 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/proxy-client.key -> /var/lib/minikube/certs/proxy-client.key
I0217 11:57:16.475800 100380 certs.go:484] found cert: /home/jenkins/minikube-integration/20427-77349/.minikube/certs/84502.pem (1338 bytes)
W0217 11:57:16.475831 100380 certs.go:480] ignoring /home/jenkins/minikube-integration/20427-77349/.minikube/certs/84502_empty.pem, impossibly tiny 0 bytes
I0217 11:57:16.475839 100380 certs.go:484] found cert: /home/jenkins/minikube-integration/20427-77349/.minikube/certs/ca-key.pem (1679 bytes)
I0217 11:57:16.475861 100380 certs.go:484] found cert: /home/jenkins/minikube-integration/20427-77349/.minikube/certs/ca.pem (1082 bytes)
I0217 11:57:16.475900 100380 certs.go:484] found cert: /home/jenkins/minikube-integration/20427-77349/.minikube/certs/cert.pem (1123 bytes)
I0217 11:57:16.475927 100380 certs.go:484] found cert: /home/jenkins/minikube-integration/20427-77349/.minikube/certs/key.pem (1675 bytes)
I0217 11:57:16.476002 100380 certs.go:484] found cert: /home/jenkins/minikube-integration/20427-77349/.minikube/files/etc/ssl/certs/845022.pem (1708 bytes)
I0217 11:57:16.476031 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/certs/84502.pem -> /usr/share/ca-certificates/84502.pem
I0217 11:57:16.476046 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/files/etc/ssl/certs/845022.pem -> /usr/share/ca-certificates/845022.pem
I0217 11:57:16.476058 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/ca.crt -> /usr/share/ca-certificates/minikubeCA.pem
I0217 11:57:16.476652 100380 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20427-77349/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0217 11:57:16.507138 100380 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20427-77349/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I0217 11:57:16.534527 100380 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20427-77349/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0217 11:57:16.562922 100380 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20427-77349/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I0217 11:57:16.587311 100380 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1432 bytes)
I0217 11:57:16.624087 100380 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
I0217 11:57:16.662037 100380 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0217 11:57:16.713619 100380 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I0217 11:57:16.756345 100380 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20427-77349/.minikube/certs/84502.pem --> /usr/share/ca-certificates/84502.pem (1338 bytes)
I0217 11:57:16.803520 100380 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20427-77349/.minikube/files/etc/ssl/certs/845022.pem --> /usr/share/ca-certificates/845022.pem (1708 bytes)
I0217 11:57:16.846879 100380 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20427-77349/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0217 11:57:16.920267 100380 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I0217 11:57:16.950648 100380 ssh_runner.go:195] Run: openssl version
I0217 11:57:16.958784 100380 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/84502.pem && ln -fs /usr/share/ca-certificates/84502.pem /etc/ssl/certs/84502.pem"
I0217 11:57:16.987238 100380 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/84502.pem
I0217 11:57:16.994220 100380 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Feb 17 11:42 /usr/share/ca-certificates/84502.pem
I0217 11:57:16.994283 100380 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/84502.pem
I0217 11:57:17.016466 100380 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/84502.pem /etc/ssl/certs/51391683.0"
I0217 11:57:17.039972 100380 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/845022.pem && ln -fs /usr/share/ca-certificates/845022.pem /etc/ssl/certs/845022.pem"
I0217 11:57:17.061818 100380 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/845022.pem
I0217 11:57:17.068988 100380 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Feb 17 11:42 /usr/share/ca-certificates/845022.pem
I0217 11:57:17.069057 100380 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/845022.pem
I0217 11:57:17.075953 100380 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/845022.pem /etc/ssl/certs/3ec20f2e.0"
I0217 11:57:17.094161 100380 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0217 11:57:17.111313 100380 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0217 11:57:17.116268 100380 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Feb 17 11:35 /usr/share/ca-certificates/minikubeCA.pem
I0217 11:57:17.116335 100380 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0217 11:57:17.122743 100380 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0217 11:57:17.141827 100380 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0217 11:57:17.146771 100380 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-etcd-client.crt -checkend 86400
I0217 11:57:17.158301 100380 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-kubelet-client.crt -checkend 86400
I0217 11:57:17.170200 100380 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/server.crt -checkend 86400
I0217 11:57:17.177413 100380 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/healthcheck-client.crt -checkend 86400
I0217 11:57:17.186556 100380 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/peer.crt -checkend 86400
I0217 11:57:17.193933 100380 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/front-proxy-client.crt -checkend 86400
I0217 11:57:17.203839 100380 kubeadm.go:392] StartCluster: {Name:ha-783738 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube/iso/minikube-v1.35.0-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.46-1739182054-20387@sha256:3788b0691001f3da958b3956b3e6c1d1db8535d5286bd2e096e6e75dc609dbad Memory:2200 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.32.1 ClusterName:ha-783738 Namespace:def
ault APIServerHAVIP:192.168.39.254 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.39.249 Port:8443 KubernetesVersion:v1.32.1 ContainerRuntime:docker ControlPlane:true Worker:true} {Name:m02 IP:192.168.39.31 Port:8443 KubernetesVersion:v1.32.1 ContainerRuntime:docker ControlPlane:true Worker:true} {Name:m04 IP:192.168.39.168 Port:0 KubernetesVersion:v1.32.1 ContainerRuntime:docker ControlPlane:false Worker:true}] Addons:map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:false efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gad
get:false istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:false storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:
false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0217 11:57:17.204089 100380 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I0217 11:57:17.225257 100380 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I0217 11:57:17.236858 100380 kubeadm.go:408] found existing configuration files, will attempt cluster restart
I0217 11:57:17.236876 100380 kubeadm.go:593] restartPrimaryControlPlane start ...
I0217 11:57:17.236920 100380 ssh_runner.go:195] Run: sudo test -d /data/minikube
I0217 11:57:17.246285 100380 kubeadm.go:130] /data/minikube skipping compat symlinks: sudo test -d /data/minikube: Process exited with status 1
stdout:
stderr:
I0217 11:57:17.246828 100380 kubeconfig.go:47] verify endpoint returned: get endpoint: "ha-783738" does not appear in /home/jenkins/minikube-integration/20427-77349/kubeconfig
I0217 11:57:17.246986 100380 kubeconfig.go:62] /home/jenkins/minikube-integration/20427-77349/kubeconfig needs updating (will repair): [kubeconfig missing "ha-783738" cluster setting kubeconfig missing "ha-783738" context setting]
I0217 11:57:17.247367 100380 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20427-77349/kubeconfig: {Name:mka23a5c17f10bb58374e83755a2ac6a44464e11 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0217 11:57:17.247895 100380 loader.go:402] Config loaded from file: /home/jenkins/minikube-integration/20427-77349/kubeconfig
I0217 11:57:17.248117 100380 kapi.go:59] client config for ha-783738: &rest.Config{Host:"https://192.168.39.249:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/client.crt", KeyFile:"/home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/client.key", CAFile:"/home/jenkins/minikube-integration/20427-77349/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(nil), CAData:[]uint8(nil), NextProtos:[]string(nil)
}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x24df700), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
I0217 11:57:17.248591 100380 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false
I0217 11:57:17.248610 100380 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false
I0217 11:57:17.248615 100380 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false
I0217 11:57:17.248619 100380 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false
I0217 11:57:17.248634 100380 cert_rotation.go:140] Starting client certificate rotation controller
I0217 11:57:17.249054 100380 ssh_runner.go:195] Run: sudo diff -u /var/tmp/minikube/kubeadm.yaml /var/tmp/minikube/kubeadm.yaml.new
I0217 11:57:17.258029 100380 kubeadm.go:630] The running cluster does not require reconfiguration: 192.168.39.249
I0217 11:57:17.258053 100380 kubeadm.go:597] duration metric: took 21.170416ms to restartPrimaryControlPlane
I0217 11:57:17.258062 100380 kubeadm.go:394] duration metric: took 54.240079ms to StartCluster
I0217 11:57:17.258077 100380 settings.go:142] acquiring lock: {Name:mkf730c657b1c2d5a481dbeb02dabe7dfa17f2d2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0217 11:57:17.258150 100380 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/20427-77349/kubeconfig
I0217 11:57:17.258639 100380 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20427-77349/kubeconfig: {Name:mka23a5c17f10bb58374e83755a2ac6a44464e11 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0217 11:57:17.258848 100380 start.go:233] HA (multi-control plane) cluster: will skip waiting for primary control-plane node &{Name: IP:192.168.39.249 Port:8443 KubernetesVersion:v1.32.1 ContainerRuntime:docker ControlPlane:true Worker:true}
I0217 11:57:17.258870 100380 start.go:241] waiting for startup goroutines ...
I0217 11:57:17.258884 100380 addons.go:511] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:false efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:false storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I0217 11:57:17.259112 100380 config.go:182] Loaded profile config "ha-783738": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.32.1
I0217 11:57:17.261397 100380 out.go:177] * Enabled addons:
I0217 11:57:17.262668 100380 addons.go:514] duration metric: took 3.785415ms for enable addons: enabled=[]
I0217 11:57:17.262703 100380 start.go:246] waiting for cluster config update ...
I0217 11:57:17.262713 100380 start.go:255] writing updated cluster config ...
I0217 11:57:17.264127 100380 out.go:201]
I0217 11:57:17.265577 100380 config.go:182] Loaded profile config "ha-783738": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.32.1
I0217 11:57:17.265703 100380 profile.go:143] Saving config to /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/config.json ...
I0217 11:57:17.267570 100380 out.go:177] * Starting "ha-783738-m02" control-plane node in "ha-783738" cluster
I0217 11:57:17.268921 100380 preload.go:131] Checking if preload exists for k8s version v1.32.1 and runtime docker
I0217 11:57:17.268950 100380 cache.go:56] Caching tarball of preloaded images
I0217 11:57:17.269061 100380 preload.go:172] Found /home/jenkins/minikube-integration/20427-77349/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.32.1-docker-overlay2-amd64.tar.lz4 in cache, skipping download
I0217 11:57:17.269074 100380 cache.go:59] Finished verifying existence of preloaded tar for v1.32.1 on docker
I0217 11:57:17.269250 100380 profile.go:143] Saving config to /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/config.json ...
I0217 11:57:17.269484 100380 start.go:360] acquireMachinesLock for ha-783738-m02: {Name:mk05ba8323ae77ab7dcc14c378d65810d956fdc0 Clock:{} Delay:500ms Timeout:13m0s Cancel:<nil>}
I0217 11:57:17.269554 100380 start.go:364] duration metric: took 46.103µs to acquireMachinesLock for "ha-783738-m02"
I0217 11:57:17.269576 100380 start.go:96] Skipping create...Using existing machine configuration
I0217 11:57:17.269584 100380 fix.go:54] fixHost starting: m02
I0217 11:57:17.269846 100380 main.go:141] libmachine: Found binary path at /home/jenkins/workspace/KVM_Linux_integration/out/docker-machine-driver-kvm2
I0217 11:57:17.269891 100380 main.go:141] libmachine: Launching plugin server for driver kvm2
I0217 11:57:17.284961 100380 main.go:141] libmachine: Plugin server listening at address 127.0.0.1:45093
I0217 11:57:17.285438 100380 main.go:141] libmachine: () Calling .GetVersion
I0217 11:57:17.285964 100380 main.go:141] libmachine: Using API Version 1
I0217 11:57:17.285991 100380 main.go:141] libmachine: () Calling .SetConfigRaw
I0217 11:57:17.286358 100380 main.go:141] libmachine: () Calling .GetMachineName
I0217 11:57:17.286562 100380 main.go:141] libmachine: (ha-783738-m02) Calling .DriverName
I0217 11:57:17.286744 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetState
I0217 11:57:17.288288 100380 fix.go:112] recreateIfNeeded on ha-783738-m02: state=Stopped err=<nil>
I0217 11:57:17.288317 100380 main.go:141] libmachine: (ha-783738-m02) Calling .DriverName
W0217 11:57:17.288473 100380 fix.go:138] unexpected machine state, will restart: <nil>
I0217 11:57:17.290496 100380 out.go:177] * Restarting existing kvm2 VM for "ha-783738-m02" ...
I0217 11:57:17.291737 100380 main.go:141] libmachine: (ha-783738-m02) Calling .Start
I0217 11:57:17.291936 100380 main.go:141] libmachine: (ha-783738-m02) starting domain...
I0217 11:57:17.291957 100380 main.go:141] libmachine: (ha-783738-m02) ensuring networks are active...
I0217 11:57:17.292625 100380 main.go:141] libmachine: (ha-783738-m02) Ensuring network default is active
I0217 11:57:17.292935 100380 main.go:141] libmachine: (ha-783738-m02) Ensuring network mk-ha-783738 is active
I0217 11:57:17.293260 100380 main.go:141] libmachine: (ha-783738-m02) getting domain XML...
I0217 11:57:17.293893 100380 main.go:141] libmachine: (ha-783738-m02) creating domain...
I0217 11:57:18.506378 100380 main.go:141] libmachine: (ha-783738-m02) waiting for IP...
I0217 11:57:18.507364 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:18.507881 100380 main.go:141] libmachine: (ha-783738-m02) DBG | unable to find current IP address of domain ha-783738-m02 in network mk-ha-783738
I0217 11:57:18.507974 100380 main.go:141] libmachine: (ha-783738-m02) DBG | I0217 11:57:18.507878 100573 retry.go:31] will retry after 190.071186ms: waiting for domain to come up
I0217 11:57:18.699203 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:18.699617 100380 main.go:141] libmachine: (ha-783738-m02) DBG | unable to find current IP address of domain ha-783738-m02 in network mk-ha-783738
I0217 11:57:18.699682 100380 main.go:141] libmachine: (ha-783738-m02) DBG | I0217 11:57:18.699590 100573 retry.go:31] will retry after 254.022024ms: waiting for domain to come up
I0217 11:57:18.955132 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:18.955578 100380 main.go:141] libmachine: (ha-783738-m02) DBG | unable to find current IP address of domain ha-783738-m02 in network mk-ha-783738
I0217 11:57:18.955602 100380 main.go:141] libmachine: (ha-783738-m02) DBG | I0217 11:57:18.955533 100573 retry.go:31] will retry after 332.594264ms: waiting for domain to come up
I0217 11:57:19.290041 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:19.290494 100380 main.go:141] libmachine: (ha-783738-m02) DBG | unable to find current IP address of domain ha-783738-m02 in network mk-ha-783738
I0217 11:57:19.290519 100380 main.go:141] libmachine: (ha-783738-m02) DBG | I0217 11:57:19.290472 100573 retry.go:31] will retry after 550.484931ms: waiting for domain to come up
I0217 11:57:19.842363 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:19.842844 100380 main.go:141] libmachine: (ha-783738-m02) DBG | unable to find current IP address of domain ha-783738-m02 in network mk-ha-783738
I0217 11:57:19.842873 100380 main.go:141] libmachine: (ha-783738-m02) DBG | I0217 11:57:19.842822 100573 retry.go:31] will retry after 743.60757ms: waiting for domain to come up
I0217 11:57:20.587667 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:20.588025 100380 main.go:141] libmachine: (ha-783738-m02) DBG | unable to find current IP address of domain ha-783738-m02 in network mk-ha-783738
I0217 11:57:20.588058 100380 main.go:141] libmachine: (ha-783738-m02) DBG | I0217 11:57:20.587981 100573 retry.go:31] will retry after 701.750144ms: waiting for domain to come up
I0217 11:57:21.290980 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:21.291500 100380 main.go:141] libmachine: (ha-783738-m02) DBG | unable to find current IP address of domain ha-783738-m02 in network mk-ha-783738
I0217 11:57:21.291530 100380 main.go:141] libmachine: (ha-783738-m02) DBG | I0217 11:57:21.291445 100573 retry.go:31] will retry after 755.313925ms: waiting for domain to come up
I0217 11:57:22.047876 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:22.048286 100380 main.go:141] libmachine: (ha-783738-m02) DBG | unable to find current IP address of domain ha-783738-m02 in network mk-ha-783738
I0217 11:57:22.048318 100380 main.go:141] libmachine: (ha-783738-m02) DBG | I0217 11:57:22.048246 100573 retry.go:31] will retry after 1.338224716s: waiting for domain to come up
I0217 11:57:23.388238 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:23.388759 100380 main.go:141] libmachine: (ha-783738-m02) DBG | unable to find current IP address of domain ha-783738-m02 in network mk-ha-783738
I0217 11:57:23.388796 100380 main.go:141] libmachine: (ha-783738-m02) DBG | I0217 11:57:23.388727 100573 retry.go:31] will retry after 1.367661407s: waiting for domain to come up
I0217 11:57:24.758376 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:24.758722 100380 main.go:141] libmachine: (ha-783738-m02) DBG | unable to find current IP address of domain ha-783738-m02 in network mk-ha-783738
I0217 11:57:24.758764 100380 main.go:141] libmachine: (ha-783738-m02) DBG | I0217 11:57:24.758718 100573 retry.go:31] will retry after 2.08548116s: waiting for domain to come up
I0217 11:57:26.846621 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:26.847150 100380 main.go:141] libmachine: (ha-783738-m02) DBG | unable to find current IP address of domain ha-783738-m02 in network mk-ha-783738
I0217 11:57:26.847253 100380 main.go:141] libmachine: (ha-783738-m02) DBG | I0217 11:57:26.847166 100573 retry.go:31] will retry after 1.933968455s: waiting for domain to come up
I0217 11:57:28.782369 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:28.782785 100380 main.go:141] libmachine: (ha-783738-m02) DBG | unable to find current IP address of domain ha-783738-m02 in network mk-ha-783738
I0217 11:57:28.782815 100380 main.go:141] libmachine: (ha-783738-m02) DBG | I0217 11:57:28.782752 100573 retry.go:31] will retry after 3.162167749s: waiting for domain to come up
I0217 11:57:31.947188 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:31.947578 100380 main.go:141] libmachine: (ha-783738-m02) DBG | unable to find current IP address of domain ha-783738-m02 in network mk-ha-783738
I0217 11:57:31.947603 100380 main.go:141] libmachine: (ha-783738-m02) DBG | I0217 11:57:31.947545 100573 retry.go:31] will retry after 3.924986004s: waiting for domain to come up
I0217 11:57:35.877102 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:35.877437 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has current primary IP address 192.168.39.31 and MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:35.877460 100380 main.go:141] libmachine: (ha-783738-m02) found domain IP: 192.168.39.31
I0217 11:57:35.877473 100380 main.go:141] libmachine: (ha-783738-m02) reserving static IP address...
I0217 11:57:35.877915 100380 main.go:141] libmachine: (ha-783738-m02) DBG | found host DHCP lease matching {name: "ha-783738-m02", mac: "52:54:00:06:81:a2", ip: "192.168.39.31"} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:53:18 +0000 UTC Type:0 Mac:52:54:00:06:81:a2 Iaid: IPaddr:192.168.39.31 Prefix:24 Hostname:ha-783738-m02 Clientid:01:52:54:00:06:81:a2}
I0217 11:57:35.877942 100380 main.go:141] libmachine: (ha-783738-m02) DBG | skip adding static IP to network mk-ha-783738 - found existing host DHCP lease matching {name: "ha-783738-m02", mac: "52:54:00:06:81:a2", ip: "192.168.39.31"}
I0217 11:57:35.877960 100380 main.go:141] libmachine: (ha-783738-m02) reserved static IP address 192.168.39.31 for domain ha-783738-m02
I0217 11:57:35.877972 100380 main.go:141] libmachine: (ha-783738-m02) waiting for SSH...
I0217 11:57:35.877983 100380 main.go:141] libmachine: (ha-783738-m02) DBG | Getting to WaitForSSH function...
I0217 11:57:35.880382 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:35.880801 100380 main.go:141] libmachine: (ha-783738-m02) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:06:81:a2", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:53:18 +0000 UTC Type:0 Mac:52:54:00:06:81:a2 Iaid: IPaddr:192.168.39.31 Prefix:24 Hostname:ha-783738-m02 Clientid:01:52:54:00:06:81:a2}
I0217 11:57:35.880830 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined IP address 192.168.39.31 and MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:35.880903 100380 main.go:141] libmachine: (ha-783738-m02) DBG | Using SSH client type: external
I0217 11:57:35.880925 100380 main.go:141] libmachine: (ha-783738-m02) DBG | Using SSH private key: /home/jenkins/minikube-integration/20427-77349/.minikube/machines/ha-783738-m02/id_rsa (-rw-------)
I0217 11:57:35.880955 100380 main.go:141] libmachine: (ha-783738-m02) DBG | &{[-F /dev/null -o ConnectionAttempts=3 -o ConnectTimeout=10 -o ControlMaster=no -o ControlPath=none -o LogLevel=quiet -o PasswordAuthentication=no -o ServerAliveInterval=60 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null docker@192.168.39.31 -o IdentitiesOnly=yes -i /home/jenkins/minikube-integration/20427-77349/.minikube/machines/ha-783738-m02/id_rsa -p 22] /usr/bin/ssh <nil>}
I0217 11:57:35.880970 100380 main.go:141] libmachine: (ha-783738-m02) DBG | About to run SSH command:
I0217 11:57:35.880982 100380 main.go:141] libmachine: (ha-783738-m02) DBG | exit 0
I0217 11:57:36.005182 100380 main.go:141] libmachine: (ha-783738-m02) DBG | SSH cmd err, output: <nil>:
I0217 11:57:36.005527 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetConfigRaw
I0217 11:57:36.006216 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetIP
I0217 11:57:36.008704 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:36.009084 100380 main.go:141] libmachine: (ha-783738-m02) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:06:81:a2", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:53:18 +0000 UTC Type:0 Mac:52:54:00:06:81:a2 Iaid: IPaddr:192.168.39.31 Prefix:24 Hostname:ha-783738-m02 Clientid:01:52:54:00:06:81:a2}
I0217 11:57:36.009118 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined IP address 192.168.39.31 and MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:36.009443 100380 profile.go:143] Saving config to /home/jenkins/minikube-integration/20427-77349/.minikube/profiles/ha-783738/config.json ...
I0217 11:57:36.009639 100380 machine.go:93] provisionDockerMachine start ...
I0217 11:57:36.009657 100380 main.go:141] libmachine: (ha-783738-m02) Calling .DriverName
I0217 11:57:36.009816 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHHostname
I0217 11:57:36.011849 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:36.012187 100380 main.go:141] libmachine: (ha-783738-m02) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:06:81:a2", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:53:18 +0000 UTC Type:0 Mac:52:54:00:06:81:a2 Iaid: IPaddr:192.168.39.31 Prefix:24 Hostname:ha-783738-m02 Clientid:01:52:54:00:06:81:a2}
I0217 11:57:36.012218 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined IP address 192.168.39.31 and MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:36.012360 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHPort
I0217 11:57:36.012557 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHKeyPath
I0217 11:57:36.012710 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHKeyPath
I0217 11:57:36.012836 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHUsername
I0217 11:57:36.012947 100380 main.go:141] libmachine: Using SSH client type: native
I0217 11:57:36.013115 100380 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x865ca0] 0x868980 <nil> [] 0s} 192.168.39.31 22 <nil> <nil>}
I0217 11:57:36.013130 100380 main.go:141] libmachine: About to run SSH command:
hostname
I0217 11:57:36.113056 100380 main.go:141] libmachine: SSH cmd err, output: <nil>: minikube
I0217 11:57:36.113093 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetMachineName
I0217 11:57:36.113376 100380 buildroot.go:166] provisioning hostname "ha-783738-m02"
I0217 11:57:36.113403 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetMachineName
I0217 11:57:36.113566 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHHostname
I0217 11:57:36.116233 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:36.116606 100380 main.go:141] libmachine: (ha-783738-m02) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:06:81:a2", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:53:18 +0000 UTC Type:0 Mac:52:54:00:06:81:a2 Iaid: IPaddr:192.168.39.31 Prefix:24 Hostname:ha-783738-m02 Clientid:01:52:54:00:06:81:a2}
I0217 11:57:36.116634 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined IP address 192.168.39.31 and MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:36.116762 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHPort
I0217 11:57:36.116907 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHKeyPath
I0217 11:57:36.117025 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHKeyPath
I0217 11:57:36.117242 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHUsername
I0217 11:57:36.117464 100380 main.go:141] libmachine: Using SSH client type: native
I0217 11:57:36.117681 100380 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x865ca0] 0x868980 <nil> [] 0s} 192.168.39.31 22 <nil> <nil>}
I0217 11:57:36.117699 100380 main.go:141] libmachine: About to run SSH command:
sudo hostname ha-783738-m02 && echo "ha-783738-m02" | sudo tee /etc/hostname
I0217 11:57:36.230628 100380 main.go:141] libmachine: SSH cmd err, output: <nil>: ha-783738-m02
I0217 11:57:36.230670 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHHostname
I0217 11:57:36.233644 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:36.233991 100380 main.go:141] libmachine: (ha-783738-m02) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:06:81:a2", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:53:18 +0000 UTC Type:0 Mac:52:54:00:06:81:a2 Iaid: IPaddr:192.168.39.31 Prefix:24 Hostname:ha-783738-m02 Clientid:01:52:54:00:06:81:a2}
I0217 11:57:36.234015 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined IP address 192.168.39.31 and MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:36.234196 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHPort
I0217 11:57:36.234491 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHKeyPath
I0217 11:57:36.234686 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHKeyPath
I0217 11:57:36.234856 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHUsername
I0217 11:57:36.235006 100380 main.go:141] libmachine: Using SSH client type: native
I0217 11:57:36.235194 100380 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x865ca0] 0x868980 <nil> [] 0s} 192.168.39.31 22 <nil> <nil>}
I0217 11:57:36.235211 100380 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\sha-783738-m02' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 ha-783738-m02/g' /etc/hosts;
else
echo '127.0.1.1 ha-783738-m02' | sudo tee -a /etc/hosts;
fi
fi
I0217 11:57:36.341290 100380 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0217 11:57:36.341332 100380 buildroot.go:172] set auth options {CertDir:/home/jenkins/minikube-integration/20427-77349/.minikube CaCertPath:/home/jenkins/minikube-integration/20427-77349/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/20427-77349/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/20427-77349/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/20427-77349/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/20427-77349/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/20427-77349/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/20427-77349/.minikube}
I0217 11:57:36.341348 100380 buildroot.go:174] setting up certificates
I0217 11:57:36.341360 100380 provision.go:84] configureAuth start
I0217 11:57:36.341373 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetMachineName
I0217 11:57:36.341646 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetIP
I0217 11:57:36.344453 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:36.344944 100380 main.go:141] libmachine: (ha-783738-m02) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:06:81:a2", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:53:18 +0000 UTC Type:0 Mac:52:54:00:06:81:a2 Iaid: IPaddr:192.168.39.31 Prefix:24 Hostname:ha-783738-m02 Clientid:01:52:54:00:06:81:a2}
I0217 11:57:36.344981 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined IP address 192.168.39.31 and MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:36.345158 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHHostname
I0217 11:57:36.347416 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:36.347719 100380 main.go:141] libmachine: (ha-783738-m02) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:06:81:a2", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:53:18 +0000 UTC Type:0 Mac:52:54:00:06:81:a2 Iaid: IPaddr:192.168.39.31 Prefix:24 Hostname:ha-783738-m02 Clientid:01:52:54:00:06:81:a2}
I0217 11:57:36.347744 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined IP address 192.168.39.31 and MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:36.347910 100380 provision.go:143] copyHostCerts
I0217 11:57:36.347943 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/certs/ca.pem -> /home/jenkins/minikube-integration/20427-77349/.minikube/ca.pem
I0217 11:57:36.347989 100380 exec_runner.go:144] found /home/jenkins/minikube-integration/20427-77349/.minikube/ca.pem, removing ...
I0217 11:57:36.347999 100380 exec_runner.go:203] rm: /home/jenkins/minikube-integration/20427-77349/.minikube/ca.pem
I0217 11:57:36.348065 100380 exec_runner.go:151] cp: /home/jenkins/minikube-integration/20427-77349/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/20427-77349/.minikube/ca.pem (1082 bytes)
I0217 11:57:36.348156 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/certs/cert.pem -> /home/jenkins/minikube-integration/20427-77349/.minikube/cert.pem
I0217 11:57:36.348190 100380 exec_runner.go:144] found /home/jenkins/minikube-integration/20427-77349/.minikube/cert.pem, removing ...
I0217 11:57:36.348200 100380 exec_runner.go:203] rm: /home/jenkins/minikube-integration/20427-77349/.minikube/cert.pem
I0217 11:57:36.348229 100380 exec_runner.go:151] cp: /home/jenkins/minikube-integration/20427-77349/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/20427-77349/.minikube/cert.pem (1123 bytes)
I0217 11:57:36.348286 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/certs/key.pem -> /home/jenkins/minikube-integration/20427-77349/.minikube/key.pem
I0217 11:57:36.348310 100380 exec_runner.go:144] found /home/jenkins/minikube-integration/20427-77349/.minikube/key.pem, removing ...
I0217 11:57:36.348320 100380 exec_runner.go:203] rm: /home/jenkins/minikube-integration/20427-77349/.minikube/key.pem
I0217 11:57:36.348347 100380 exec_runner.go:151] cp: /home/jenkins/minikube-integration/20427-77349/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/20427-77349/.minikube/key.pem (1675 bytes)
I0217 11:57:36.348413 100380 provision.go:117] generating server cert: /home/jenkins/minikube-integration/20427-77349/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/20427-77349/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/20427-77349/.minikube/certs/ca-key.pem org=jenkins.ha-783738-m02 san=[127.0.0.1 192.168.39.31 ha-783738-m02 localhost minikube]
I0217 11:57:36.476199 100380 provision.go:177] copyRemoteCerts
I0217 11:57:36.476256 100380 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0217 11:57:36.476280 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHHostname
I0217 11:57:36.479126 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:36.479497 100380 main.go:141] libmachine: (ha-783738-m02) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:06:81:a2", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:53:18 +0000 UTC Type:0 Mac:52:54:00:06:81:a2 Iaid: IPaddr:192.168.39.31 Prefix:24 Hostname:ha-783738-m02 Clientid:01:52:54:00:06:81:a2}
I0217 11:57:36.479529 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined IP address 192.168.39.31 and MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:36.479677 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHPort
I0217 11:57:36.479868 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHKeyPath
I0217 11:57:36.480073 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHUsername
I0217 11:57:36.480258 100380 sshutil.go:53] new ssh client: &{IP:192.168.39.31 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/20427-77349/.minikube/machines/ha-783738-m02/id_rsa Username:docker}
I0217 11:57:36.558954 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/machines/server-key.pem -> /etc/docker/server-key.pem
I0217 11:57:36.559023 100380 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20427-77349/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I0217 11:57:36.581755 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/certs/ca.pem -> /etc/docker/ca.pem
I0217 11:57:36.581816 100380 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20427-77349/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I0217 11:57:36.604328 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/machines/server.pem -> /etc/docker/server.pem
I0217 11:57:36.604411 100380 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20427-77349/.minikube/machines/server.pem --> /etc/docker/server.pem (1208 bytes)
I0217 11:57:36.626183 100380 provision.go:87] duration metric: took 284.807453ms to configureAuth
I0217 11:57:36.626219 100380 buildroot.go:189] setting minikube options for container-runtime
I0217 11:57:36.626492 100380 config.go:182] Loaded profile config "ha-783738": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.32.1
I0217 11:57:36.626522 100380 main.go:141] libmachine: (ha-783738-m02) Calling .DriverName
I0217 11:57:36.626768 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHHostname
I0217 11:57:36.629194 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:36.629569 100380 main.go:141] libmachine: (ha-783738-m02) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:06:81:a2", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:53:18 +0000 UTC Type:0 Mac:52:54:00:06:81:a2 Iaid: IPaddr:192.168.39.31 Prefix:24 Hostname:ha-783738-m02 Clientid:01:52:54:00:06:81:a2}
I0217 11:57:36.629594 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined IP address 192.168.39.31 and MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:36.629740 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHPort
I0217 11:57:36.629904 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHKeyPath
I0217 11:57:36.630077 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHKeyPath
I0217 11:57:36.630201 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHUsername
I0217 11:57:36.630389 100380 main.go:141] libmachine: Using SSH client type: native
I0217 11:57:36.630601 100380 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x865ca0] 0x868980 <nil> [] 0s} 192.168.39.31 22 <nil> <nil>}
I0217 11:57:36.630614 100380 main.go:141] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I0217 11:57:36.730964 100380 main.go:141] libmachine: SSH cmd err, output: <nil>: tmpfs
I0217 11:57:36.730995 100380 buildroot.go:70] root file system type: tmpfs
I0217 11:57:36.731148 100380 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I0217 11:57:36.731184 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHHostname
I0217 11:57:36.733718 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:36.734119 100380 main.go:141] libmachine: (ha-783738-m02) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:06:81:a2", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:53:18 +0000 UTC Type:0 Mac:52:54:00:06:81:a2 Iaid: IPaddr:192.168.39.31 Prefix:24 Hostname:ha-783738-m02 Clientid:01:52:54:00:06:81:a2}
I0217 11:57:36.734150 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined IP address 192.168.39.31 and MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:36.734340 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHPort
I0217 11:57:36.734539 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHKeyPath
I0217 11:57:36.734714 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHKeyPath
I0217 11:57:36.734847 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHUsername
I0217 11:57:36.734986 100380 main.go:141] libmachine: Using SSH client type: native
I0217 11:57:36.735198 100380 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x865ca0] 0x868980 <nil> [] 0s} 192.168.39.31 22 <nil> <nil>}
I0217 11:57:36.735304 100380 main.go:141] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network.target minikube-automount.service docker.socket
Requires= minikube-automount.service docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
Environment="NO_PROXY=192.168.39.249"
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I0217 11:57:36.846599 100380 main.go:141] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network.target minikube-automount.service docker.socket
Requires= minikube-automount.service docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
Environment=NO_PROXY=192.168.39.249
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
I0217 11:57:36.846633 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHHostname
I0217 11:57:36.849370 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:36.849714 100380 main.go:141] libmachine: (ha-783738-m02) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:06:81:a2", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:53:18 +0000 UTC Type:0 Mac:52:54:00:06:81:a2 Iaid: IPaddr:192.168.39.31 Prefix:24 Hostname:ha-783738-m02 Clientid:01:52:54:00:06:81:a2}
I0217 11:57:36.849733 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined IP address 192.168.39.31 and MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:36.849923 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHPort
I0217 11:57:36.850116 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHKeyPath
I0217 11:57:36.850290 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHKeyPath
I0217 11:57:36.850443 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHUsername
I0217 11:57:36.850608 100380 main.go:141] libmachine: Using SSH client type: native
I0217 11:57:36.850788 100380 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x865ca0] 0x868980 <nil> [] 0s} 192.168.39.31 22 <nil> <nil>}
I0217 11:57:36.850805 100380 main.go:141] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I0217 11:57:38.700010 100380 main.go:141] libmachine: SSH cmd err, output: <nil>: diff: can't stat '/lib/systemd/system/docker.service': No such file or directory
Created symlink /etc/systemd/system/multi-user.target.wants/docker.service → /usr/lib/systemd/system/docker.service.
I0217 11:57:38.700036 100380 machine.go:96] duration metric: took 2.690384734s to provisionDockerMachine
I0217 11:57:38.700051 100380 start.go:293] postStartSetup for "ha-783738-m02" (driver="kvm2")
I0217 11:57:38.700060 100380 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0217 11:57:38.700075 100380 main.go:141] libmachine: (ha-783738-m02) Calling .DriverName
I0217 11:57:38.700389 100380 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0217 11:57:38.700425 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHHostname
I0217 11:57:38.703068 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:38.703435 100380 main.go:141] libmachine: (ha-783738-m02) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:06:81:a2", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:53:18 +0000 UTC Type:0 Mac:52:54:00:06:81:a2 Iaid: IPaddr:192.168.39.31 Prefix:24 Hostname:ha-783738-m02 Clientid:01:52:54:00:06:81:a2}
I0217 11:57:38.703465 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined IP address 192.168.39.31 and MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:38.703605 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHPort
I0217 11:57:38.703807 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHKeyPath
I0217 11:57:38.703952 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHUsername
I0217 11:57:38.704102 100380 sshutil.go:53] new ssh client: &{IP:192.168.39.31 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/20427-77349/.minikube/machines/ha-783738-m02/id_rsa Username:docker}
I0217 11:57:38.783381 100380 ssh_runner.go:195] Run: cat /etc/os-release
I0217 11:57:38.787188 100380 info.go:137] Remote host: Buildroot 2023.02.9
I0217 11:57:38.787215 100380 filesync.go:126] Scanning /home/jenkins/minikube-integration/20427-77349/.minikube/addons for local assets ...
I0217 11:57:38.787270 100380 filesync.go:126] Scanning /home/jenkins/minikube-integration/20427-77349/.minikube/files for local assets ...
I0217 11:57:38.787341 100380 filesync.go:149] local asset: /home/jenkins/minikube-integration/20427-77349/.minikube/files/etc/ssl/certs/845022.pem -> 845022.pem in /etc/ssl/certs
I0217 11:57:38.787352 100380 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20427-77349/.minikube/files/etc/ssl/certs/845022.pem -> /etc/ssl/certs/845022.pem
I0217 11:57:38.787430 100380 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I0217 11:57:38.796091 100380 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20427-77349/.minikube/files/etc/ssl/certs/845022.pem --> /etc/ssl/certs/845022.pem (1708 bytes)
I0217 11:57:38.817716 100380 start.go:296] duration metric: took 117.649565ms for postStartSetup
I0217 11:57:38.817759 100380 main.go:141] libmachine: (ha-783738-m02) Calling .DriverName
I0217 11:57:38.818052 100380 ssh_runner.go:195] Run: sudo ls --almost-all -1 /var/lib/minikube/backup
I0217 11:57:38.818087 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHHostname
I0217 11:57:38.820354 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:38.820669 100380 main.go:141] libmachine: (ha-783738-m02) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:06:81:a2", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:53:18 +0000 UTC Type:0 Mac:52:54:00:06:81:a2 Iaid: IPaddr:192.168.39.31 Prefix:24 Hostname:ha-783738-m02 Clientid:01:52:54:00:06:81:a2}
I0217 11:57:38.820694 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined IP address 192.168.39.31 and MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:38.820809 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHPort
I0217 11:57:38.820978 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHKeyPath
I0217 11:57:38.821138 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHUsername
I0217 11:57:38.821273 100380 sshutil.go:53] new ssh client: &{IP:192.168.39.31 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/20427-77349/.minikube/machines/ha-783738-m02/id_rsa Username:docker}
I0217 11:57:38.900214 100380 machine.go:197] restoring vm config from /var/lib/minikube/backup: [etc]
I0217 11:57:38.900294 100380 ssh_runner.go:195] Run: sudo rsync --archive --update /var/lib/minikube/backup/etc /
I0217 11:57:38.959273 100380 fix.go:56] duration metric: took 21.689681729s for fixHost
I0217 11:57:38.959327 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHHostname
I0217 11:57:38.961853 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:38.962326 100380 main.go:141] libmachine: (ha-783738-m02) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:06:81:a2", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:53:18 +0000 UTC Type:0 Mac:52:54:00:06:81:a2 Iaid: IPaddr:192.168.39.31 Prefix:24 Hostname:ha-783738-m02 Clientid:01:52:54:00:06:81:a2}
I0217 11:57:38.962364 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined IP address 192.168.39.31 and MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:38.962591 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHPort
I0217 11:57:38.962788 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHKeyPath
I0217 11:57:38.962952 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHKeyPath
I0217 11:57:38.963062 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHUsername
I0217 11:57:38.963238 100380 main.go:141] libmachine: Using SSH client type: native
I0217 11:57:38.963408 100380 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x865ca0] 0x868980 <nil> [] 0s} 192.168.39.31 22 <nil> <nil>}
I0217 11:57:38.963419 100380 main.go:141] libmachine: About to run SSH command:
date +%s.%N
I0217 11:57:39.071315 100380 main.go:141] libmachine: SSH cmd err, output: <nil>: 1739793459.049434891
I0217 11:57:39.071339 100380 fix.go:216] guest clock: 1739793459.049434891
I0217 11:57:39.071349 100380 fix.go:229] Guest: 2025-02-17 11:57:39.049434891 +0000 UTC Remote: 2025-02-17 11:57:38.959302801 +0000 UTC m=+48.782039917 (delta=90.13209ms)
I0217 11:57:39.071366 100380 fix.go:200] guest clock delta is within tolerance: 90.13209ms
I0217 11:57:39.071371 100380 start.go:83] releasing machines lock for "ha-783738-m02", held for 21.801804436s
I0217 11:57:39.071393 100380 main.go:141] libmachine: (ha-783738-m02) Calling .DriverName
I0217 11:57:39.071600 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetIP
I0217 11:57:39.074321 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:39.074707 100380 main.go:141] libmachine: (ha-783738-m02) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:06:81:a2", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:53:18 +0000 UTC Type:0 Mac:52:54:00:06:81:a2 Iaid: IPaddr:192.168.39.31 Prefix:24 Hostname:ha-783738-m02 Clientid:01:52:54:00:06:81:a2}
I0217 11:57:39.074736 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined IP address 192.168.39.31 and MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:39.076949 100380 out.go:177] * Found network options:
I0217 11:57:39.078428 100380 out.go:177] - NO_PROXY=192.168.39.249
W0217 11:57:39.079686 100380 proxy.go:119] fail to check proxy env: Error ip not in block
I0217 11:57:39.079714 100380 main.go:141] libmachine: (ha-783738-m02) Calling .DriverName
I0217 11:57:39.080218 100380 main.go:141] libmachine: (ha-783738-m02) Calling .DriverName
I0217 11:57:39.080403 100380 main.go:141] libmachine: (ha-783738-m02) Calling .DriverName
I0217 11:57:39.080510 100380 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0217 11:57:39.080551 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHHostname
W0217 11:57:39.080631 100380 proxy.go:119] fail to check proxy env: Error ip not in block
I0217 11:57:39.080722 100380 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0217 11:57:39.080748 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHHostname
I0217 11:57:39.083432 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:39.083453 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:39.083887 100380 main.go:141] libmachine: (ha-783738-m02) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:06:81:a2", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:53:18 +0000 UTC Type:0 Mac:52:54:00:06:81:a2 Iaid: IPaddr:192.168.39.31 Prefix:24 Hostname:ha-783738-m02 Clientid:01:52:54:00:06:81:a2}
I0217 11:57:39.083914 100380 main.go:141] libmachine: (ha-783738-m02) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:06:81:a2", ip: ""} in network mk-ha-783738: {Iface:virbr1 ExpiryTime:2025-02-17 12:53:18 +0000 UTC Type:0 Mac:52:54:00:06:81:a2 Iaid: IPaddr:192.168.39.31 Prefix:24 Hostname:ha-783738-m02 Clientid:01:52:54:00:06:81:a2}
I0217 11:57:39.083933 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined IP address 192.168.39.31 and MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:39.083949 100380 main.go:141] libmachine: (ha-783738-m02) DBG | domain ha-783738-m02 has defined IP address 192.168.39.31 and MAC address 52:54:00:06:81:a2 in network mk-ha-783738
I0217 11:57:39.084264 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHPort
I0217 11:57:39.084411 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHKeyPath
I0217 11:57:39.084597 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHUsername
I0217 11:57:39.084609 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHPort
I0217 11:57:39.084763 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHKeyPath
I0217 11:57:39.084784 100380 sshutil.go:53] new ssh client: &{IP:192.168.39.31 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/20427-77349/.minikube/machines/ha-783738-m02/id_rsa Username:docker}
I0217 11:57:39.084915 100380 main.go:141] libmachine: (ha-783738-m02) Calling .GetSSHUsername
I0217 11:57:39.085034 100380 sshutil.go:53] new ssh client: &{IP:192.168.39.31 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/20427-77349/.minikube/machines/ha-783738-m02/id_rsa Username:docker}
W0217 11:57:39.178061 100380 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I0217 11:57:39.178137 100380 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0217 11:57:39.195964 100380 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s)
I0217 11:57:39.196001 100380 start.go:495] detecting cgroup driver to use...
I0217 11:57:39.196148 100380 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0217 11:57:39.216666 100380 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10"|' /etc/containerd/config.toml"
I0217 11:57:39.226815 100380 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0217 11:57:39.236611 100380 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I0217 11:57:39.236669 100380 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I0217 11:57:39.246500 100380 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0217 11:57:39.256691 100380 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0217 11:57:39.266509 100380 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0217 11:57:39.276231 100380 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0217 11:57:39.286298 100380 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0217 11:57:39.296149 100380 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0217 11:57:39.305984 100380 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0217 11:57:39.315650 100380 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0217 11:57:39.324721 100380 crio.go:166] couldn't verify netfilter by "sudo sysctl net.bridge.bridge-nf-call-iptables" which might be okay. error: sudo sysctl net.bridge.bridge-nf-call-iptables: Process exited with status 255
stdout:
stderr:
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
I0217 11:57:39.324777 100380 ssh_runner.go:195] Run: sudo modprobe br_netfilter
I0217 11:57:39.334429 100380 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0217 11:57:39.343052 100380 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0217 11:57:39.458041 100380 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0217 11:57:39.483361 100380 start.go:495] detecting cgroup driver to use...
I0217 11:57:39.483453 100380 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I0217 11:57:39.501404 100380 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I0217 11:57:39.522545 100380 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
I0217 11:57:39.545214 100380 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I0217 11:57:39.557462 100380 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0217 11:57:39.569445 100380 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I0217 11:57:39.593668 100380 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0217 11:57:39.606767 100380 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I0217 11:57:39.623713 100380 ssh_runner.go:195] Run: which cri-dockerd
I0217 11:57:39.627306 100380 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I0217 11:57:39.635920 100380 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (190 bytes)
I0217 11:57:39.651184 100380 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I0217 11:57:39.767938 100380 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I0217 11:57:39.884761 100380 docker.go:574] configuring docker to use "cgroupfs" as cgroup driver...
I0217 11:57:39.884806 100380 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
I0217 11:57:39.900934 100380 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0217 11:57:40.013206 100380 ssh_runner.go:195] Run: sudo systemctl restart docker
I0217 11:58:41.088581 100380 ssh_runner.go:235] Completed: sudo systemctl restart docker: (1m1.075335279s)
I0217 11:58:41.088680 100380 ssh_runner.go:195] Run: sudo journalctl --no-pager -u docker
I0217 11:58:41.109373 100380 out.go:201]
W0217 11:58:41.110918 100380 out.go:270] X Exiting due to RUNTIME_ENABLE: Failed to enable container runtime: sudo systemctl restart docker: Process exited with status 1
stdout:
stderr:
Job for docker.service failed because the control process exited with error code.
See "systemctl status docker.service" and "journalctl -xeu docker.service" for details.
sudo journalctl --no-pager -u docker:
-- stdout --
Feb 17 11:57:37 ha-783738-m02 systemd[1]: Starting Docker Application Container Engine...
Feb 17 11:57:37 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:37.207555071Z" level=info msg="Starting up"
Feb 17 11:57:37 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:37.208523706Z" level=info msg="containerd not running, starting managed containerd"
Feb 17 11:57:37 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:37.209284365Z" level=info msg="started new containerd process" address=/var/run/docker/containerd/containerd.sock module=libcontainerd pid=499
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.234357473Z" level=info msg="starting containerd" revision=57f17b0a6295a39009d861b89e3b3b87b005ca27 version=v1.7.23
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.253922324Z" level=info msg="loading plugin \"io.containerd.event.v1.exchange\"..." type=io.containerd.event.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.254071326Z" level=info msg="loading plugin \"io.containerd.internal.v1.opt\"..." type=io.containerd.internal.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.254155313Z" level=info msg="loading plugin \"io.containerd.warning.v1.deprecations\"..." type=io.containerd.warning.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.254195097Z" level=info msg="loading plugin \"io.containerd.snapshotter.v1.blockfile\"..." type=io.containerd.snapshotter.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.254502645Z" level=info msg="skip loading plugin \"io.containerd.snapshotter.v1.blockfile\"..." error="no scratch file generator: skip plugin" type=io.containerd.snapshotter.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.254572700Z" level=info msg="loading plugin \"io.containerd.snapshotter.v1.btrfs\"..." type=io.containerd.snapshotter.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.254826671Z" level=info msg="skip loading plugin \"io.containerd.snapshotter.v1.btrfs\"..." error="path /var/lib/docker/containerd/daemon/io.containerd.snapshotter.v1.btrfs (ext4) must be a btrfs filesystem to be used with the btrfs snapshotter: skip plugin" type=io.containerd.snapshotter.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.254880442Z" level=info msg="loading plugin \"io.containerd.snapshotter.v1.devmapper\"..." type=io.containerd.snapshotter.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.254926515Z" level=info msg="skip loading plugin \"io.containerd.snapshotter.v1.devmapper\"..." error="devmapper not configured: skip plugin" type=io.containerd.snapshotter.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.254965881Z" level=info msg="loading plugin \"io.containerd.snapshotter.v1.native\"..." type=io.containerd.snapshotter.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.255209553Z" level=info msg="loading plugin \"io.containerd.snapshotter.v1.overlayfs\"..." type=io.containerd.snapshotter.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.255502921Z" level=info msg="loading plugin \"io.containerd.snapshotter.v1.aufs\"..." type=io.containerd.snapshotter.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.257578132Z" level=info msg="skip loading plugin \"io.containerd.snapshotter.v1.aufs\"..." error="aufs is not supported (modprobe aufs failed: exit status 1 \"modprobe: FATAL: Module aufs not found in directory /lib/modules/5.10.207\\n\"): skip plugin" type=io.containerd.snapshotter.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.257723954Z" level=info msg="loading plugin \"io.containerd.snapshotter.v1.zfs\"..." type=io.containerd.snapshotter.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.257912930Z" level=info msg="skip loading plugin \"io.containerd.snapshotter.v1.zfs\"..." error="path /var/lib/docker/containerd/daemon/io.containerd.snapshotter.v1.zfs must be a zfs filesystem to be used with the zfs snapshotter: skip plugin" type=io.containerd.snapshotter.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.257960933Z" level=info msg="loading plugin \"io.containerd.content.v1.content\"..." type=io.containerd.content.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.258214223Z" level=info msg="loading plugin \"io.containerd.metadata.v1.bolt\"..." type=io.containerd.metadata.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.258292090Z" level=info msg="metadata content store policy set" policy=shared
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.262281766Z" level=info msg="loading plugin \"io.containerd.gc.v1.scheduler\"..." type=io.containerd.gc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.262389757Z" level=info msg="loading plugin \"io.containerd.differ.v1.walking\"..." type=io.containerd.differ.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.262437193Z" level=info msg="loading plugin \"io.containerd.lease.v1.manager\"..." type=io.containerd.lease.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.262478052Z" level=info msg="loading plugin \"io.containerd.streaming.v1.manager\"..." type=io.containerd.streaming.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.262523730Z" level=info msg="loading plugin \"io.containerd.runtime.v1.linux\"..." type=io.containerd.runtime.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.262614966Z" level=info msg="loading plugin \"io.containerd.monitor.v1.cgroups\"..." type=io.containerd.monitor.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.262915194Z" level=info msg="loading plugin \"io.containerd.runtime.v2.task\"..." type=io.containerd.runtime.v2
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263049035Z" level=info msg="loading plugin \"io.containerd.runtime.v2.shim\"..." type=io.containerd.runtime.v2
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263094390Z" level=info msg="loading plugin \"io.containerd.sandbox.store.v1.local\"..." type=io.containerd.sandbox.store.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263137669Z" level=info msg="loading plugin \"io.containerd.sandbox.controller.v1.local\"..." type=io.containerd.sandbox.controller.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263176270Z" level=info msg="loading plugin \"io.containerd.service.v1.containers-service\"..." type=io.containerd.service.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263217488Z" level=info msg="loading plugin \"io.containerd.service.v1.content-service\"..." type=io.containerd.service.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263254710Z" level=info msg="loading plugin \"io.containerd.service.v1.diff-service\"..." type=io.containerd.service.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263292496Z" level=info msg="loading plugin \"io.containerd.service.v1.images-service\"..." type=io.containerd.service.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263339613Z" level=info msg="loading plugin \"io.containerd.service.v1.introspection-service\"..." type=io.containerd.service.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263377065Z" level=info msg="loading plugin \"io.containerd.service.v1.namespaces-service\"..." type=io.containerd.service.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263418085Z" level=info msg="loading plugin \"io.containerd.service.v1.snapshots-service\"..." type=io.containerd.service.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263453223Z" level=info msg="loading plugin \"io.containerd.service.v1.tasks-service\"..." type=io.containerd.service.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263511094Z" level=info msg="loading plugin \"io.containerd.grpc.v1.containers\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263549833Z" level=info msg="loading plugin \"io.containerd.grpc.v1.content\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263589341Z" level=info msg="loading plugin \"io.containerd.grpc.v1.diff\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263631649Z" level=info msg="loading plugin \"io.containerd.grpc.v1.events\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263726157Z" level=info msg="loading plugin \"io.containerd.grpc.v1.images\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263766086Z" level=info msg="loading plugin \"io.containerd.grpc.v1.introspection\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263809930Z" level=info msg="loading plugin \"io.containerd.grpc.v1.leases\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263847665Z" level=info msg="loading plugin \"io.containerd.grpc.v1.namespaces\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263885358Z" level=info msg="loading plugin \"io.containerd.grpc.v1.sandbox-controllers\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263932212Z" level=info msg="loading plugin \"io.containerd.grpc.v1.sandboxes\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.263972615Z" level=info msg="loading plugin \"io.containerd.grpc.v1.snapshots\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.264020660Z" level=info msg="loading plugin \"io.containerd.grpc.v1.streaming\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.264063975Z" level=info msg="loading plugin \"io.containerd.grpc.v1.tasks\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.264103157Z" level=info msg="loading plugin \"io.containerd.transfer.v1.local\"..." type=io.containerd.transfer.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.264158305Z" level=info msg="loading plugin \"io.containerd.grpc.v1.transfer\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.264194401Z" level=info msg="loading plugin \"io.containerd.grpc.v1.version\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.264230305Z" level=info msg="loading plugin \"io.containerd.internal.v1.restart\"..." type=io.containerd.internal.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.264327104Z" level=info msg="loading plugin \"io.containerd.tracing.processor.v1.otlp\"..." type=io.containerd.tracing.processor.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.264417123Z" level=info msg="skip loading plugin \"io.containerd.tracing.processor.v1.otlp\"..." error="skip plugin: tracing endpoint not configured" type=io.containerd.tracing.processor.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.264457690Z" level=info msg="loading plugin \"io.containerd.internal.v1.tracing\"..." type=io.containerd.internal.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.264499822Z" level=info msg="skip loading plugin \"io.containerd.internal.v1.tracing\"..." error="skip plugin: tracing endpoint not configured" type=io.containerd.internal.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.264534568Z" level=info msg="loading plugin \"io.containerd.grpc.v1.healthcheck\"..." type=io.containerd.grpc.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.264575047Z" level=info msg="loading plugin \"io.containerd.nri.v1.nri\"..." type=io.containerd.nri.v1
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.264616722Z" level=info msg="NRI interface is disabled by configuration."
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.264938960Z" level=info msg=serving... address=/var/run/docker/containerd/containerd-debug.sock
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.265032087Z" level=info msg=serving... address=/var/run/docker/containerd/containerd.sock.ttrpc
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.265091203Z" level=info msg=serving... address=/var/run/docker/containerd/containerd.sock
Feb 17 11:57:37 ha-783738-m02 dockerd[499]: time="2025-02-17T11:57:37.265132167Z" level=info msg="containerd successfully booted in 0.032037s"
Feb 17 11:57:38 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:38.237803305Z" level=info msg="[graphdriver] trying configured driver: overlay2"
Feb 17 11:57:38 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:38.295143778Z" level=info msg="Loading containers: start."
Feb 17 11:57:38 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:38.484051173Z" level=warning msg="ip6tables is enabled, but cannot set up ip6tables chains" error="failed to create NAT chain DOCKER: iptables failed: ip6tables --wait -t nat -N DOCKER: ip6tables v1.8.9 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)\nPerhaps ip6tables or your kernel needs to be upgraded.\n (exit status 3)"
Feb 17 11:57:38 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:38.565431513Z" level=info msg="Default bridge (docker0) is assigned with an IP address 172.17.0.0/16. Daemon option --bip can be used to set a preferred IP address"
Feb 17 11:57:38 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:38.632528889Z" level=info msg="Loading containers: done."
Feb 17 11:57:38 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:38.653906274Z" level=warning msg="WARNING: bridge-nf-call-iptables is disabled"
Feb 17 11:57:38 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:38.653941707Z" level=warning msg="WARNING: bridge-nf-call-ip6tables is disabled"
Feb 17 11:57:38 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:38.653962858Z" level=info msg="Docker daemon" commit=92a8393 containerd-snapshotter=false storage-driver=overlay2 version=27.4.0
Feb 17 11:57:38 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:38.654196375Z" level=info msg="Daemon has completed initialization"
Feb 17 11:57:38 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:38.676178691Z" level=info msg="API listen on /var/run/docker.sock"
Feb 17 11:57:38 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:38.676315120Z" level=info msg="API listen on [::]:2376"
Feb 17 11:57:38 ha-783738-m02 systemd[1]: Started Docker Application Container Engine.
Feb 17 11:57:40 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:40.005718953Z" level=info msg="Processing signal 'terminated'"
Feb 17 11:57:40 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:40.007186879Z" level=info msg="stopping event stream following graceful shutdown" error="<nil>" module=libcontainerd namespace=moby
Feb 17 11:57:40 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:40.007378782Z" level=info msg="Daemon shutdown complete"
Feb 17 11:57:40 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:40.007446197Z" level=info msg="stopping healthcheck following graceful shutdown" module=libcontainerd
Feb 17 11:57:40 ha-783738-m02 systemd[1]: Stopping Docker Application Container Engine...
Feb 17 11:57:40 ha-783738-m02 dockerd[493]: time="2025-02-17T11:57:40.008214930Z" level=info msg="stopping event stream following graceful shutdown" error="context canceled" module=libcontainerd namespace=plugins.moby
Feb 17 11:57:41 ha-783738-m02 systemd[1]: docker.service: Deactivated successfully.
Feb 17 11:57:41 ha-783738-m02 systemd[1]: Stopped Docker Application Container Engine.
Feb 17 11:57:41 ha-783738-m02 systemd[1]: Starting Docker Application Container Engine...
Feb 17 11:57:41 ha-783738-m02 dockerd[1120]: time="2025-02-17T11:57:41.051838490Z" level=info msg="Starting up"
Feb 17 11:58:41 ha-783738-m02 dockerd[1120]: failed to start daemon: failed to dial "/run/containerd/containerd.sock": failed to dial "/run/containerd/containerd.sock": context deadline exceeded
Feb 17 11:58:41 ha-783738-m02 systemd[1]: docker.service: Main process exited, code=exited, status=1/FAILURE
Feb 17 11:58:41 ha-783738-m02 systemd[1]: docker.service: Failed with result 'exit-code'.
Feb 17 11:58:41 ha-783738-m02 systemd[1]: Failed to start Docker Application Container Engine.
-- /stdout --
W0217 11:58:41.110964 100380 out.go:270] *
W0217 11:58:41.111815 100380 out.go:293] ╭─────────────────────────────────────────────────────────────────────────────────────────────╮
│ │
│ * If the above advice does not help, please let us know: │
│ https://github.com/kubernetes/minikube/issues/new/choose │
│ │
│ * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue. │
│ │
╰─────────────────────────────────────────────────────────────────────────────────────────────╯
I0217 11:58:41.113412 100380 out.go:201]
==> Docker <==
Feb 17 11:57:23 ha-783738 dockerd[1134]: time="2025-02-17T11:57:23.574956613Z" level=info msg="loading plugin \"io.containerd.ttrpc.v1.pause\"..." runtime=io.containerd.runc.v2 type=io.containerd.ttrpc.v1
Feb 17 11:57:44 ha-783738 dockerd[1126]: time="2025-02-17T11:57:44.652472286Z" level=info msg="ignoring event" container=0eab009d1fe54d541fe5b166302e5af1a153e8aa37ad6a133704c1f40918f7c9 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Feb 17 11:57:44 ha-783738 dockerd[1134]: time="2025-02-17T11:57:44.653058320Z" level=info msg="shim disconnected" id=0eab009d1fe54d541fe5b166302e5af1a153e8aa37ad6a133704c1f40918f7c9 namespace=moby
Feb 17 11:57:44 ha-783738 dockerd[1134]: time="2025-02-17T11:57:44.653483834Z" level=warning msg="cleaning up after shim disconnected" id=0eab009d1fe54d541fe5b166302e5af1a153e8aa37ad6a133704c1f40918f7c9 namespace=moby
Feb 17 11:57:44 ha-783738 dockerd[1134]: time="2025-02-17T11:57:44.653545740Z" level=info msg="cleaning up dead shim" namespace=moby
Feb 17 11:57:45 ha-783738 dockerd[1126]: time="2025-02-17T11:57:45.663576348Z" level=info msg="ignoring event" container=1683ded4f12ef91eea7067f33248f5185b17f0532a1c1480efe277bcd8accfe6 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Feb 17 11:57:45 ha-783738 dockerd[1134]: time="2025-02-17T11:57:45.664110377Z" level=info msg="shim disconnected" id=1683ded4f12ef91eea7067f33248f5185b17f0532a1c1480efe277bcd8accfe6 namespace=moby
Feb 17 11:57:45 ha-783738 dockerd[1134]: time="2025-02-17T11:57:45.664165013Z" level=warning msg="cleaning up after shim disconnected" id=1683ded4f12ef91eea7067f33248f5185b17f0532a1c1480efe277bcd8accfe6 namespace=moby
Feb 17 11:57:45 ha-783738 dockerd[1134]: time="2025-02-17T11:57:45.664175956Z" level=info msg="cleaning up dead shim" namespace=moby
Feb 17 11:58:04 ha-783738 dockerd[1134]: time="2025-02-17T11:58:04.854960498Z" level=info msg="loading plugin \"io.containerd.event.v1.publisher\"..." runtime=io.containerd.runc.v2 type=io.containerd.event.v1
Feb 17 11:58:04 ha-783738 dockerd[1134]: time="2025-02-17T11:58:04.855123802Z" level=info msg="loading plugin \"io.containerd.internal.v1.shutdown\"..." runtime=io.containerd.runc.v2 type=io.containerd.internal.v1
Feb 17 11:58:04 ha-783738 dockerd[1134]: time="2025-02-17T11:58:04.855151191Z" level=info msg="loading plugin \"io.containerd.ttrpc.v1.task\"..." runtime=io.containerd.runc.v2 type=io.containerd.ttrpc.v1
Feb 17 11:58:04 ha-783738 dockerd[1134]: time="2025-02-17T11:58:04.855373177Z" level=info msg="loading plugin \"io.containerd.ttrpc.v1.pause\"..." runtime=io.containerd.runc.v2 type=io.containerd.ttrpc.v1
Feb 17 11:58:04 ha-783738 dockerd[1134]: time="2025-02-17T11:58:04.858152322Z" level=info msg="loading plugin \"io.containerd.event.v1.publisher\"..." runtime=io.containerd.runc.v2 type=io.containerd.event.v1
Feb 17 11:58:04 ha-783738 dockerd[1134]: time="2025-02-17T11:58:04.858222102Z" level=info msg="loading plugin \"io.containerd.internal.v1.shutdown\"..." runtime=io.containerd.runc.v2 type=io.containerd.internal.v1
Feb 17 11:58:04 ha-783738 dockerd[1134]: time="2025-02-17T11:58:04.858232103Z" level=info msg="loading plugin \"io.containerd.ttrpc.v1.task\"..." runtime=io.containerd.runc.v2 type=io.containerd.ttrpc.v1
Feb 17 11:58:04 ha-783738 dockerd[1134]: time="2025-02-17T11:58:04.858372930Z" level=info msg="loading plugin \"io.containerd.ttrpc.v1.pause\"..." runtime=io.containerd.runc.v2 type=io.containerd.ttrpc.v1
Feb 17 11:58:25 ha-783738 dockerd[1126]: time="2025-02-17T11:58:25.325613613Z" level=info msg="ignoring event" container=0d8dd6abc6b0262f0e2de062685df6bbc87187dd14023d0fd12b894f48bd2001 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Feb 17 11:58:25 ha-783738 dockerd[1134]: time="2025-02-17T11:58:25.326644755Z" level=info msg="shim disconnected" id=0d8dd6abc6b0262f0e2de062685df6bbc87187dd14023d0fd12b894f48bd2001 namespace=moby
Feb 17 11:58:25 ha-783738 dockerd[1134]: time="2025-02-17T11:58:25.326737271Z" level=warning msg="cleaning up after shim disconnected" id=0d8dd6abc6b0262f0e2de062685df6bbc87187dd14023d0fd12b894f48bd2001 namespace=moby
Feb 17 11:58:25 ha-783738 dockerd[1134]: time="2025-02-17T11:58:25.326756884Z" level=info msg="cleaning up dead shim" namespace=moby
Feb 17 11:58:26 ha-783738 dockerd[1126]: time="2025-02-17T11:58:26.334899301Z" level=info msg="ignoring event" container=2e90f752fdc0601abb5401e228fa8355b97462cfd9f4dafb766f56eaf8e7b13a module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Feb 17 11:58:26 ha-783738 dockerd[1134]: time="2025-02-17T11:58:26.335703125Z" level=info msg="shim disconnected" id=2e90f752fdc0601abb5401e228fa8355b97462cfd9f4dafb766f56eaf8e7b13a namespace=moby
Feb 17 11:58:26 ha-783738 dockerd[1134]: time="2025-02-17T11:58:26.335778773Z" level=warning msg="cleaning up after shim disconnected" id=2e90f752fdc0601abb5401e228fa8355b97462cfd9f4dafb766f56eaf8e7b13a namespace=moby
Feb 17 11:58:26 ha-783738 dockerd[1134]: time="2025-02-17T11:58:26.335795547Z" level=info msg="cleaning up dead shim" namespace=moby
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD
2e90f752fdc06 019ee182b58e2 37 seconds ago Exited kube-controller-manager 4 eeb1b6c34de35 kube-controller-manager-ha-783738
0d8dd6abc6b02 95c0bda56fc4d 37 seconds ago Exited kube-apiserver 4 a531c479908eb kube-apiserver-ha-783738
d524d25a3256e 2b0d6572d062c About a minute ago Running kube-scheduler 2 5633bc5aacc12 kube-scheduler-ha-783738
2b8921c7d9f71 22f88dde2caa4 About a minute ago Running kube-vip 1 5f0329677cb70 kube-vip-ha-783738
aeb757a6db075 a9e7e6b294baf About a minute ago Running etcd 2 8c5c6a3fd0ba0 etcd-ha-783738
8c236b02a8316 6e38f40d628db 4 minutes ago Exited storage-provisioner 3 3b5478be91580 storage-provisioner
f460be4118731 8c811b4aec35f 4 minutes ago Exited busybox 1 cd41205ee4990 busybox-58667487b6-mp8w2
5caaef1da4142 e29f9c7391fd9 4 minutes ago Exited kube-proxy 1 3bada7fe972b9 kube-proxy-pgwb4
95f567924c5ee c69fa2e9cbf5f 4 minutes ago Exited coredns 1 33c8d49183b1a coredns-668d6bf9bc-bhrvt
b4ccb469b39af df3849d954c98 4 minutes ago Exited kindnet-cni 1 bba5ce66a15dd kindnet-t72ln
b674f5b7afb38 c69fa2e9cbf5f 4 minutes ago Exited coredns 1 bfd8d387b7e96 coredns-668d6bf9bc-k5k72
1395373a3c212 2b0d6572d062c 5 minutes ago Exited kube-scheduler 1 fe3b7022472a7 kube-scheduler-ha-783738
0644596c7e815 a9e7e6b294baf 5 minutes ago Exited etcd 1 a79f0d4414c0a etcd-ha-783738
905fe651f5a2d 22f88dde2caa4 5 minutes ago Exited kube-vip 0 6e727a24edb43 kube-vip-ha-783738
==> coredns [95f567924c5e] <==
[WARNING] plugin/kubernetes: starting server with unsynced Kubernetes API
.:53
[INFO] plugin/reload: Running configuration SHA512 = 680cec097987c24242735352e9de77b2ba657caea131666c4002607b6f81fb6322fe6fa5c2d434be3fcd1251845cd6b7641e3a08a7d3b88486730de31a010646
CoreDNS-1.11.3
linux/amd64, go1.21.11, a6338e9
[INFO] 127.0.0.1:54083 - 5538 "HINFO IN 6952713337195609451.67698316276633629. udp 55 false 512" NXDOMAIN qr,rd,ra 55 0.046526479s
[INFO] plugin/ready: Still waiting on: "kubernetes"
[INFO] plugin/ready: Still waiting on: "kubernetes"
[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229: failed to list *v1.Service: Get "https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[INFO] plugin/kubernetes: Trace[586752551]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229 (17-Feb-2025 11:53:59.037) (total time: 30004ms):
Trace[586752551]: ---"Objects listed" error:Get "https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout 30004ms (11:54:29.042)
Trace[586752551]: [30.004932204s] [30.004932204s] END
[ERROR] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229: Failed to watch *v1.Service: failed to list *v1.Service: Get "https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229: failed to list *v1.Namespace: Get "https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[INFO] plugin/kubernetes: Trace[31748474]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229 (17-Feb-2025 11:53:59.037) (total time: 30005ms):
Trace[31748474]: ---"Objects listed" error:Get "https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout 30005ms (11:54:29.043)
Trace[31748474]: [30.005260877s] [30.005260877s] END
[ERROR] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229: Failed to watch *v1.Namespace: failed to list *v1.Namespace: Get "https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229: failed to list *v1.EndpointSlice: Get "https://10.96.0.1:443/apis/discovery.k8s.io/v1/endpointslices?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[INFO] plugin/kubernetes: Trace[1254162758]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229 (17-Feb-2025 11:53:59.043) (total time: 30000ms):
Trace[1254162758]: ---"Objects listed" error:Get "https://10.96.0.1:443/apis/discovery.k8s.io/v1/endpointslices?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout 30000ms (11:54:29.044)
Trace[1254162758]: [30.000938039s] [30.000938039s] END
[ERROR] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229: Failed to watch *v1.EndpointSlice: failed to list *v1.EndpointSlice: Get "https://10.96.0.1:443/apis/discovery.k8s.io/v1/endpointslices?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[INFO] SIGTERM: Shutting down servers then terminating
[INFO] plugin/health: Going into lameduck mode for 5s
==> coredns [b674f5b7afb3] <==
[WARNING] plugin/kubernetes: starting server with unsynced Kubernetes API
.:53
[INFO] plugin/reload: Running configuration SHA512 = 680cec097987c24242735352e9de77b2ba657caea131666c4002607b6f81fb6322fe6fa5c2d434be3fcd1251845cd6b7641e3a08a7d3b88486730de31a010646
CoreDNS-1.11.3
linux/amd64, go1.21.11, a6338e9
[INFO] 127.0.0.1:47652 - 30454 "HINFO IN 3233588620932119307.6917908993167898246. udp 57 false 512" NXDOMAIN qr,rd,ra 57 0.026177844s
[INFO] plugin/ready: Still waiting on: "kubernetes"
[INFO] plugin/ready: Still waiting on: "kubernetes"
[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229: failed to list *v1.Service: Get "https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[INFO] plugin/kubernetes: Trace[1310151553]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229 (17-Feb-2025 11:53:59.042) (total time: 30001ms):
Trace[1310151553]: ---"Objects listed" error:Get "https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout 30000ms (11:54:29.043)
Trace[1310151553]: [30.001216976s] [30.001216976s] END
[ERROR] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229: Failed to watch *v1.Service: failed to list *v1.Service: Get "https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229: failed to list *v1.EndpointSlice: Get "https://10.96.0.1:443/apis/discovery.k8s.io/v1/endpointslices?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[INFO] plugin/kubernetes: Trace[1951418715]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229 (17-Feb-2025 11:53:59.039) (total time: 30005ms):
Trace[1951418715]: ---"Objects listed" error:Get "https://10.96.0.1:443/apis/discovery.k8s.io/v1/endpointslices?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout 30005ms (11:54:29.044)
Trace[1951418715]: [30.005382964s] [30.005382964s] END
[ERROR] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229: Failed to watch *v1.EndpointSlice: failed to list *v1.EndpointSlice: Get "https://10.96.0.1:443/apis/discovery.k8s.io/v1/endpointslices?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229: failed to list *v1.Namespace: Get "https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[INFO] plugin/kubernetes: Trace[606941673]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229 (17-Feb-2025 11:53:59.038) (total time: 30006ms):
Trace[606941673]: ---"Objects listed" error:Get "https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout 30006ms (11:54:29.044)
Trace[606941673]: [30.006431575s] [30.006431575s] END
[ERROR] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229: Failed to watch *v1.Namespace: failed to list *v1.Namespace: Get "https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[INFO] SIGTERM: Shutting down servers then terminating
[INFO] plugin/health: Going into lameduck mode for 5s
==> describe nodes <==
command /bin/bash -c "sudo /var/lib/minikube/binaries/v1.32.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" failed with error: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.32.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
E0217 11:58:41.991338 2717 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8443/api?timeout=32s\": dial tcp 127.0.0.1:8443: connect: connection refused"
E0217 11:58:41.993795 2717 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8443/api?timeout=32s\": dial tcp 127.0.0.1:8443: connect: connection refused"
E0217 11:58:41.995188 2717 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8443/api?timeout=32s\": dial tcp 127.0.0.1:8443: connect: connection refused"
E0217 11:58:41.996646 2717 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8443/api?timeout=32s\": dial tcp 127.0.0.1:8443: connect: connection refused"
E0217 11:58:41.998266 2717 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8443/api?timeout=32s\": dial tcp 127.0.0.1:8443: connect: connection refused"
The connection to the server localhost:8443 was refused - did you specify the right host or port?
==> dmesg <==
[Feb17 11:56] You have booted with nomodeset. This means your GPU drivers are DISABLED
[ +0.000000] Any video related functionality will be severely degraded, and you may not even be able to suspend the system properly
[ +0.000001] Unless you actually understand what nomodeset does, you should reboot without enabling it
[ +0.052638] Spectre V2 : WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!
[ +0.037697] acpi PNP0A03:00: fail to add MMCONFIG information, can't access extended PCI configuration space under this bridge.
[ +4.851026] platform regulatory.0: Direct firmware load for regulatory.db failed with error -2
[ +1.992141] systemd-fstab-generator[116]: Ignoring "noauto" option for root device
[Feb17 11:57] NFSD: Using /var/lib/nfs/v4recovery as the NFSv4 state recovery directory
[ +0.000006] NFSD: unable to find recovery directory /var/lib/nfs/v4recovery
[ +0.000001] NFSD: Unable to initialize client recovery tracking! (-2)
[ +7.664405] systemd-fstab-generator[476]: Ignoring "noauto" option for root device
[ +0.058988] kauditd_printk_skb: 1 callbacks suppressed
[ +0.058916] systemd-fstab-generator[488]: Ignoring "noauto" option for root device
[ +2.348725] systemd-fstab-generator[1055]: Ignoring "noauto" option for root device
[ +0.313948] systemd-fstab-generator[1092]: Ignoring "noauto" option for root device
[ +0.110900] systemd-fstab-generator[1104]: Ignoring "noauto" option for root device
[ +0.140552] systemd-fstab-generator[1118]: Ignoring "noauto" option for root device
[ +2.263360] kauditd_printk_skb: 199 callbacks suppressed
[ +0.301992] systemd-fstab-generator[1377]: Ignoring "noauto" option for root device
[ +0.125509] systemd-fstab-generator[1390]: Ignoring "noauto" option for root device
[ +0.118202] systemd-fstab-generator[1402]: Ignoring "noauto" option for root device
[ +0.144218] systemd-fstab-generator[1417]: Ignoring "noauto" option for root device
[ +0.508597] systemd-fstab-generator[1584]: Ignoring "noauto" option for root device
[ +6.843964] kauditd_printk_skb: 180 callbacks suppressed
[ +8.294455] kauditd_printk_skb: 40 callbacks suppressed
==> etcd [0644596c7e81] <==
{"level":"warn","ts":"2025-02-17T11:56:37.953386Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"416.799075ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/deployments/\" range_end:\"/registry/deployments0\" count_only:true ","response":"","error":"context canceled"}
{"level":"info","ts":"2025-02-17T11:56:37.953402Z","caller":"traceutil/trace.go:171","msg":"trace[234534568] range","detail":"{range_begin:/registry/deployments/; range_end:/registry/deployments0; }","duration":"416.832899ms","start":"2025-02-17T11:56:37.536564Z","end":"2025-02-17T11:56:37.953396Z","steps":["trace[234534568] 'agreement among raft nodes before linearized reading' (duration: 416.815476ms)"],"step_count":1}
{"level":"warn","ts":"2025-02-17T11:56:37.953416Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2025-02-17T11:56:37.536510Z","time spent":"416.902435ms","remote":"127.0.0.1:58532","response type":"/etcdserverpb.KV/Range","request count":0,"request size":50,"response count":0,"response size":0,"request content":"key:\"/registry/deployments/\" range_end:\"/registry/deployments0\" count_only:true "}
2025/02/17 11:56:37 WARNING: [core] [Server #6] grpc: Server.processUnaryRPC failed to write status: connection error: desc = "transport is closing"
{"level":"warn","ts":"2025-02-17T11:56:37.953469Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"1.057072714s","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/health\" ","response":"","error":"context canceled"}
{"level":"info","ts":"2025-02-17T11:56:37.953479Z","caller":"traceutil/trace.go:171","msg":"trace[2020420396] range","detail":"{range_begin:/registry/health; range_end:; }","duration":"1.057490424s","start":"2025-02-17T11:56:36.895986Z","end":"2025-02-17T11:56:37.953476Z","steps":["trace[2020420396] 'agreement among raft nodes before linearized reading' (duration: 1.057479846s)"],"step_count":1}
{"level":"warn","ts":"2025-02-17T11:56:37.953491Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2025-02-17T11:56:36.895975Z","time spent":"1.057513489s","remote":"127.0.0.1:58120","response type":"/etcdserverpb.KV/Range","request count":0,"request size":18,"response count":0,"response size":0,"request content":"key:\"/registry/health\" "}
2025/02/17 11:56:37 WARNING: [core] [Server #6] grpc: Server.processUnaryRPC failed to write status: connection error: desc = "transport is closing"
{"level":"warn","ts":"2025-02-17T11:56:37.953557Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"1.889027766s","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/podtemplates/\" range_end:\"/registry/podtemplates0\" count_only:true ","response":"","error":"context canceled"}
{"level":"info","ts":"2025-02-17T11:56:37.953567Z","caller":"traceutil/trace.go:171","msg":"trace[159538693] range","detail":"{range_begin:/registry/podtemplates/; range_end:/registry/podtemplates0; }","duration":"1.889056203s","start":"2025-02-17T11:56:36.064508Z","end":"2025-02-17T11:56:37.953564Z","steps":["trace[159538693] 'agreement among raft nodes before linearized reading' (duration: 1.88904446s)"],"step_count":1}
{"level":"warn","ts":"2025-02-17T11:56:37.953580Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2025-02-17T11:56:36.064496Z","time spent":"1.889079683s","remote":"127.0.0.1:58254","response type":"/etcdserverpb.KV/Range","request count":0,"request size":52,"response count":0,"response size":0,"request content":"key:\"/registry/podtemplates/\" range_end:\"/registry/podtemplates0\" count_only:true "}
2025/02/17 11:56:37 WARNING: [core] [Server #6] grpc: Server.processUnaryRPC failed to write status: connection error: desc = "transport is closing"
{"level":"warn","ts":"2025-02-17T11:56:38.012328Z","caller":"embed/serve.go:212","msg":"stopping secure grpc server due to error","error":"accept tcp 192.168.39.249:2379: use of closed network connection"}
{"level":"warn","ts":"2025-02-17T11:56:38.012367Z","caller":"embed/serve.go:214","msg":"stopped secure grpc server due to error","error":"accept tcp 192.168.39.249:2379: use of closed network connection"}
{"level":"info","ts":"2025-02-17T11:56:38.012413Z","caller":"etcdserver/server.go:1534","msg":"skipped leadership transfer; local server is not leader","local-member-id":"318ee90c3446d547","current-leader-member-id":"0"}
{"level":"info","ts":"2025-02-17T11:56:38.012793Z","caller":"rafthttp/peer.go:330","msg":"stopping remote peer","remote-peer-id":"645ac05e9f2d470a"}
{"level":"info","ts":"2025-02-17T11:56:38.012892Z","caller":"rafthttp/stream.go:294","msg":"stopped TCP streaming connection with remote peer","stream-writer-type":"stream MsgApp v2","remote-peer-id":"645ac05e9f2d470a"}
{"level":"info","ts":"2025-02-17T11:56:38.012915Z","caller":"rafthttp/stream.go:294","msg":"stopped TCP streaming connection with remote peer","stream-writer-type":"stream Message","remote-peer-id":"645ac05e9f2d470a"}
{"level":"info","ts":"2025-02-17T11:56:38.012991Z","caller":"rafthttp/pipeline.go:85","msg":"stopped HTTP pipelining with remote peer","local-member-id":"318ee90c3446d547","remote-peer-id":"645ac05e9f2d470a"}
{"level":"info","ts":"2025-02-17T11:56:38.013022Z","caller":"rafthttp/stream.go:442","msg":"stopped stream reader with remote peer","stream-reader-type":"stream MsgApp v2","local-member-id":"318ee90c3446d547","remote-peer-id":"645ac05e9f2d470a"}
{"level":"info","ts":"2025-02-17T11:56:38.013134Z","caller":"rafthttp/stream.go:442","msg":"stopped stream reader with remote peer","stream-reader-type":"stream Message","local-member-id":"318ee90c3446d547","remote-peer-id":"645ac05e9f2d470a"}
{"level":"info","ts":"2025-02-17T11:56:38.013145Z","caller":"rafthttp/peer.go:335","msg":"stopped remote peer","remote-peer-id":"645ac05e9f2d470a"}
{"level":"info","ts":"2025-02-17T11:56:38.016636Z","caller":"embed/etcd.go:582","msg":"stopping serving peer traffic","address":"192.168.39.249:2380"}
{"level":"info","ts":"2025-02-17T11:56:38.016720Z","caller":"embed/etcd.go:587","msg":"stopped serving peer traffic","address":"192.168.39.249:2380"}
{"level":"info","ts":"2025-02-17T11:56:38.016728Z","caller":"embed/etcd.go:380","msg":"closed etcd server","name":"ha-783738","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.39.249:2380"],"advertise-client-urls":["https://192.168.39.249:2379"]}
==> etcd [aeb757a6db07] <==
{"level":"info","ts":"2025-02-17T11:58:37.637100Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"318ee90c3446d547 received MsgPreVoteResp from 318ee90c3446d547 at term 3"}
{"level":"info","ts":"2025-02-17T11:58:37.637132Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"318ee90c3446d547 [logterm: 3, index: 3030] sent MsgPreVote request to 645ac05e9f2d470a at term 3"}
{"level":"warn","ts":"2025-02-17T11:58:37.832695Z","caller":"etcdserver/v3_server.go:920","msg":"waiting for ReadIndex response took too long, retrying","sent-request-id":15368416165570069265,"retry-timeout":"500ms"}
{"level":"warn","ts":"2025-02-17T11:58:38.333313Z","caller":"etcdserver/v3_server.go:920","msg":"waiting for ReadIndex response took too long, retrying","sent-request-id":15368416165570069265,"retry-timeout":"500ms"}
{"level":"warn","ts":"2025-02-17T11:58:38.833992Z","caller":"etcdserver/v3_server.go:920","msg":"waiting for ReadIndex response took too long, retrying","sent-request-id":15368416165570069265,"retry-timeout":"500ms"}
{"level":"warn","ts":"2025-02-17T11:58:39.105914Z","caller":"rafthttp/probing_status.go:68","msg":"prober detected unhealthy status","round-tripper-name":"ROUND_TRIPPER_SNAPSHOT","remote-peer-id":"645ac05e9f2d470a","rtt":"0s","error":"dial tcp 192.168.39.31:2380: connect: connection refused"}
{"level":"warn","ts":"2025-02-17T11:58:39.106133Z","caller":"rafthttp/probing_status.go:68","msg":"prober detected unhealthy status","round-tripper-name":"ROUND_TRIPPER_RAFT_MESSAGE","remote-peer-id":"645ac05e9f2d470a","rtt":"0s","error":"dial tcp 192.168.39.31:2380: connect: connection refused"}
{"level":"info","ts":"2025-02-17T11:58:39.236323Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"318ee90c3446d547 is starting a new election at term 3"}
{"level":"info","ts":"2025-02-17T11:58:39.236529Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"318ee90c3446d547 became pre-candidate at term 3"}
{"level":"info","ts":"2025-02-17T11:58:39.236639Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"318ee90c3446d547 received MsgPreVoteResp from 318ee90c3446d547 at term 3"}
{"level":"info","ts":"2025-02-17T11:58:39.236682Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"318ee90c3446d547 [logterm: 3, index: 3030] sent MsgPreVote request to 645ac05e9f2d470a at term 3"}
{"level":"warn","ts":"2025-02-17T11:58:39.334913Z","caller":"etcdserver/v3_server.go:920","msg":"waiting for ReadIndex response took too long, retrying","sent-request-id":15368416165570069265,"retry-timeout":"500ms"}
{"level":"warn","ts":"2025-02-17T11:58:39.836002Z","caller":"etcdserver/v3_server.go:920","msg":"waiting for ReadIndex response took too long, retrying","sent-request-id":15368416165570069265,"retry-timeout":"500ms"}
{"level":"warn","ts":"2025-02-17T11:58:40.336905Z","caller":"etcdserver/v3_server.go:920","msg":"waiting for ReadIndex response took too long, retrying","sent-request-id":15368416165570069265,"retry-timeout":"500ms"}
{"level":"info","ts":"2025-02-17T11:58:40.836559Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"318ee90c3446d547 is starting a new election at term 3"}
{"level":"info","ts":"2025-02-17T11:58:40.836692Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"318ee90c3446d547 became pre-candidate at term 3"}
{"level":"info","ts":"2025-02-17T11:58:40.836729Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"318ee90c3446d547 received MsgPreVoteResp from 318ee90c3446d547 at term 3"}
{"level":"info","ts":"2025-02-17T11:58:40.836762Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"318ee90c3446d547 [logterm: 3, index: 3030] sent MsgPreVote request to 645ac05e9f2d470a at term 3"}
{"level":"warn","ts":"2025-02-17T11:58:40.837045Z","caller":"etcdserver/v3_server.go:920","msg":"waiting for ReadIndex response took too long, retrying","sent-request-id":15368416165570069265,"retry-timeout":"500ms"}
{"level":"warn","ts":"2025-02-17T11:58:41.084143Z","caller":"etcdserver/server.go:2161","msg":"failed to publish local member to cluster through raft","local-member-id":"318ee90c3446d547","local-member-attributes":"{Name:ha-783738 ClientURLs:[https://192.168.39.249:2379]}","request-path":"/0/members/318ee90c3446d547/attributes","publish-timeout":"7s","error":"etcdserver: request timed out"}
{"level":"warn","ts":"2025-02-17T11:58:41.337434Z","caller":"etcdserver/v3_server.go:920","msg":"waiting for ReadIndex response took too long, retrying","sent-request-id":15368416165570069265,"retry-timeout":"500ms"}
{"level":"warn","ts":"2025-02-17T11:58:41.827365Z","caller":"etcdserver/v3_server.go:932","msg":"timed out waiting for read index response (local node might have slow network)","timeout":"7s"}
{"level":"warn","ts":"2025-02-17T11:58:41.827445Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"7.000504247s","expected-duration":"100ms","prefix":"read-only range ","request":"limit:1 keys_only:true ","response":"","error":"etcdserver: request timed out"}
{"level":"info","ts":"2025-02-17T11:58:41.827469Z","caller":"traceutil/trace.go:171","msg":"trace[1958910963] range","detail":"{range_begin:; range_end:; }","duration":"7.000551306s","start":"2025-02-17T11:58:34.826907Z","end":"2025-02-17T11:58:41.827459Z","steps":["trace[1958910963] 'agreement among raft nodes before linearized reading' (duration: 7.000502454s)"],"step_count":1}
{"level":"error","ts":"2025-02-17T11:58:41.827501Z","caller":"etcdhttp/health.go:367","msg":"Health check error","path":"/readyz","reason":"[+]serializable_read ok\n[-]linearizable_read failed: etcdserver: request timed out\n[+]data_corruption ok\n","status-code":503,"stacktrace":"go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp.(*CheckRegistry).installRootHttpEndpoint.newHealthHandler.func2\n\tgo.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/health.go:367\nnet/http.HandlerFunc.ServeHTTP\n\tnet/http/server.go:2171\nnet/http.(*ServeMux).ServeHTTP\n\tnet/http/server.go:2688\nnet/http.serverHandler.ServeHTTP\n\tnet/http/server.go:3142\nnet/http.(*conn).serve\n\tnet/http/server.go:2044"}
==> kernel <==
11:58:42 up 1 min, 0 users, load average: 0.47, 0.29, 0.11
Linux ha-783738 5.10.207 #1 SMP Tue Jan 14 08:15:54 UTC 2025 x86_64 GNU/Linux
PRETTY_NAME="Buildroot 2023.02.9"
==> kindnet [b4ccb469b39a] <==
I0217 11:56:00.000922 1 main.go:324] Node ha-783738-m02 has CIDR [10.244.1.0/24]
I0217 11:56:00.001386 1 main.go:297] Handling node with IPs: map[192.168.39.216:{}]
I0217 11:56:00.001417 1 main.go:324] Node ha-783738-m03 has CIDR [10.244.2.0/24]
I0217 11:56:00.002870 1 main.go:297] Handling node with IPs: map[192.168.39.168:{}]
I0217 11:56:00.003089 1 main.go:324] Node ha-783738-m04 has CIDR [10.244.3.0/24]
I0217 11:56:10.003758 1 main.go:297] Handling node with IPs: map[192.168.39.31:{}]
I0217 11:56:10.004120 1 main.go:324] Node ha-783738-m02 has CIDR [10.244.1.0/24]
I0217 11:56:10.004466 1 main.go:297] Handling node with IPs: map[192.168.39.216:{}]
I0217 11:56:10.004579 1 main.go:324] Node ha-783738-m03 has CIDR [10.244.2.0/24]
I0217 11:56:10.004848 1 main.go:297] Handling node with IPs: map[192.168.39.168:{}]
I0217 11:56:10.004993 1 main.go:324] Node ha-783738-m04 has CIDR [10.244.3.0/24]
I0217 11:56:10.005322 1 main.go:297] Handling node with IPs: map[192.168.39.249:{}]
I0217 11:56:10.005440 1 main.go:301] handling current node
I0217 11:56:20.008868 1 main.go:297] Handling node with IPs: map[192.168.39.249:{}]
I0217 11:56:20.008992 1 main.go:301] handling current node
I0217 11:56:20.009032 1 main.go:297] Handling node with IPs: map[192.168.39.31:{}]
I0217 11:56:20.009107 1 main.go:324] Node ha-783738-m02 has CIDR [10.244.1.0/24]
I0217 11:56:20.009351 1 main.go:297] Handling node with IPs: map[192.168.39.168:{}]
I0217 11:56:20.009426 1 main.go:324] Node ha-783738-m04 has CIDR [10.244.3.0/24]
I0217 11:56:30.000205 1 main.go:297] Handling node with IPs: map[192.168.39.168:{}]
I0217 11:56:30.000320 1 main.go:324] Node ha-783738-m04 has CIDR [10.244.3.0/24]
I0217 11:56:30.000673 1 main.go:297] Handling node with IPs: map[192.168.39.249:{}]
I0217 11:56:30.004120 1 main.go:301] handling current node
I0217 11:56:30.004403 1 main.go:297] Handling node with IPs: map[192.168.39.31:{}]
I0217 11:56:30.004484 1 main.go:324] Node ha-783738-m02 has CIDR [10.244.1.0/24]
==> kube-apiserver [0d8dd6abc6b0] <==
W0217 11:58:05.008746 1 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags
I0217 11:58:05.009254 1 options.go:238] external host was not specified, using 192.168.39.249
I0217 11:58:05.012100 1 server.go:143] Version: v1.32.1
I0217 11:58:05.012139 1 server.go:145] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I0217 11:58:05.254592 1 shared_informer.go:313] Waiting for caches to sync for node_authorizer
I0217 11:58:05.265931 1 shared_informer.go:313] Waiting for caches to sync for *generic.policySource[*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicy,*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicyBinding,k8s.io/apiserver/pkg/admission/plugin/policy/validating.Validator]
I0217 11:58:05.302917 1 plugins.go:157] Loaded 13 mutating admission controller(s) successfully in the following order: NamespaceLifecycle,LimitRanger,ServiceAccount,NodeRestriction,TaintNodesByCondition,Priority,DefaultTolerationSeconds,DefaultStorageClass,StorageObjectInUseProtection,RuntimeClass,DefaultIngressClass,MutatingAdmissionPolicy,MutatingAdmissionWebhook.
I0217 11:58:05.302958 1 plugins.go:160] Loaded 13 validating admission controller(s) successfully in the following order: LimitRanger,ServiceAccount,PodSecurity,Priority,PersistentVolumeClaimResize,RuntimeClass,CertificateApproval,CertificateSigning,ClusterTrustBundleAttest,CertificateSubjectRestriction,ValidatingAdmissionPolicy,ValidatingAdmissionWebhook,ResourceQuota.
I0217 11:58:05.303380 1 instance.go:233] Using reconciler: lease
W0217 11:58:25.253372 1 logging.go:55] [core] [Channel #1 SubChannel #3]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", }. Err: connection error: desc = "transport: authentication handshake failed: context canceled"
W0217 11:58:25.253478 1 logging.go:55] [core] [Channel #2 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", }. Err: connection error: desc = "transport: authentication handshake failed: context canceled"
F0217 11:58:25.304453 1 instance.go:226] Error creating leases: error creating storage factory: context deadline exceeded
==> kube-controller-manager [2e90f752fdc0] <==
I0217 11:58:05.575513 1 serving.go:386] Generated self-signed cert in-memory
I0217 11:58:05.850219 1 controllermanager.go:185] "Starting" version="v1.32.1"
I0217 11:58:05.850380 1 controllermanager.go:187] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I0217 11:58:05.851835 1 secure_serving.go:213] Serving securely on 127.0.0.1:10257
I0217 11:58:05.852508 1 dynamic_cafile_content.go:161] "Starting controller" name="request-header::/var/lib/minikube/certs/front-proxy-ca.crt"
I0217 11:58:05.852713 1 tlsconfig.go:243] "Starting DynamicServingCertificateController"
I0217 11:58:05.852833 1 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt"
E0217 11:58:26.312388 1 controllermanager.go:230] "Error building controller context" err="failed to wait for apiserver being healthy: timed out waiting for the condition: failed to get apiserver /healthz status: Get \"https://192.168.39.249:8443/healthz\": dial tcp 192.168.39.249:8443: connect: connection refused"
==> kube-proxy [5caaef1da414] <==
add table ip kube-proxy
^^^^^^^^^^^^^^^^^^^^^^^^
>
E0217 11:53:59.616708 1 proxier.go:733] "Error cleaning up nftables rules" err=<
could not run nftables command: /dev/stdin:1:1-25: Error: Could not process rule: Operation not supported
add table ip6 kube-proxy
^^^^^^^^^^^^^^^^^^^^^^^^^
>
I0217 11:53:59.651486 1 server.go:698] "Successfully retrieved node IP(s)" IPs=["192.168.39.249"]
E0217 11:53:59.651650 1 server.go:234] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I0217 11:53:59.696326 1 server_linux.go:147] "No iptables support for family" ipFamily="IPv6"
I0217 11:53:59.696377 1 server.go:245] "kube-proxy running in single-stack mode" ipFamily="IPv4"
I0217 11:53:59.696401 1 server_linux.go:170] "Using iptables Proxier"
I0217 11:53:59.710221 1 proxier.go:255] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I0217 11:53:59.711347 1 server.go:497] "Version info" version="v1.32.1"
I0217 11:53:59.711380 1 server.go:499] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I0217 11:53:59.716398 1 config.go:199] "Starting service config controller"
I0217 11:53:59.717714 1 config.go:105] "Starting endpoint slice config controller"
I0217 11:53:59.717746 1 shared_informer.go:313] Waiting for caches to sync for endpoint slice config
I0217 11:53:59.718142 1 shared_informer.go:313] Waiting for caches to sync for service config
I0217 11:53:59.718615 1 config.go:329] "Starting node config controller"
I0217 11:53:59.718758 1 shared_informer.go:313] Waiting for caches to sync for node config
I0217 11:53:59.817915 1 shared_informer.go:320] Caches are synced for endpoint slice config
I0217 11:53:59.819456 1 shared_informer.go:320] Caches are synced for service config
I0217 11:53:59.821373 1 shared_informer.go:320] Caches are synced for node config
==> kube-scheduler [1395373a3c21] <==
E0217 11:53:52.919534 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: Get \"https://192.168.39.249:8443/api/v1/replicationcontrollers?limit=500&resourceVersion=0\": dial tcp 192.168.39.249:8443: connect: connection refused" logger="UnhandledError"
W0217 11:53:53.771964 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.StorageClass: Get "https://192.168.39.249:8443/apis/storage.k8s.io/v1/storageclasses?limit=500&resourceVersion=0": dial tcp 192.168.39.249:8443: connect: connection refused
E0217 11:53:53.772105 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: Get \"https://192.168.39.249:8443/apis/storage.k8s.io/v1/storageclasses?limit=500&resourceVersion=0\": dial tcp 192.168.39.249:8443: connect: connection refused" logger="UnhandledError"
W0217 11:53:55.316775 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PersistentVolume: Get "https://192.168.39.249:8443/api/v1/persistentvolumes?limit=500&resourceVersion=0": dial tcp 192.168.39.249:8443: connect: connection refused
E0217 11:53:55.316841 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: Get \"https://192.168.39.249:8443/api/v1/persistentvolumes?limit=500&resourceVersion=0\": dial tcp 192.168.39.249:8443: connect: connection refused" logger="UnhandledError"
W0217 11:53:55.317229 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PodDisruptionBudget: Get "https://192.168.39.249:8443/apis/policy/v1/poddisruptionbudgets?limit=500&resourceVersion=0": dial tcp 192.168.39.249:8443: connect: connection refused
E0217 11:53:55.317287 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: Get \"https://192.168.39.249:8443/apis/policy/v1/poddisruptionbudgets?limit=500&resourceVersion=0\": dial tcp 192.168.39.249:8443: connect: connection refused" logger="UnhandledError"
W0217 11:53:55.599247 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Pod: Get "https://192.168.39.249:8443/api/v1/pods?fieldSelector=status.phase%21%3DSucceeded%2Cstatus.phase%21%3DFailed&limit=500&resourceVersion=0": dial tcp 192.168.39.249:8443: connect: connection refused
E0217 11:53:55.599332 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Pod: failed to list *v1.Pod: Get \"https://192.168.39.249:8443/api/v1/pods?fieldSelector=status.phase%21%3DSucceeded%2Cstatus.phase%21%3DFailed&limit=500&resourceVersion=0\": dial tcp 192.168.39.249:8443: connect: connection refused" logger="UnhandledError"
W0217 11:53:55.855471 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://192.168.39.249:8443/api/v1/nodes?limit=500&resourceVersion=0": dial tcp 192.168.39.249:8443: connect: connection refused
E0217 11:53:55.855524 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://192.168.39.249:8443/api/v1/nodes?limit=500&resourceVersion=0\": dial tcp 192.168.39.249:8443: connect: connection refused" logger="UnhandledError"
W0217 11:53:56.059180 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Namespace: Get "https://192.168.39.249:8443/api/v1/namespaces?limit=500&resourceVersion=0": dial tcp 192.168.39.249:8443: connect: connection refused
E0217 11:53:56.059238 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Namespace: failed to list *v1.Namespace: Get \"https://192.168.39.249:8443/api/v1/namespaces?limit=500&resourceVersion=0\": dial tcp 192.168.39.249:8443: connect: connection refused" logger="UnhandledError"
W0217 11:53:59.073926 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E0217 11:53:59.074031 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
W0217 11:53:59.074570 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
W0217 11:53:59.075126 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E0217 11:53:59.075450 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
E0217 11:53:59.074624 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicasets\" in API group \"apps\" at the cluster scope" logger="UnhandledError"
I0217 11:54:13.896773 1 shared_informer.go:320] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
E0217 11:56:05.957670 1 framework.go:1316] "Plugin Failed" err="Operation cannot be fulfilled on pods/binding \"busybox-58667487b6-v7x5t\": pod busybox-58667487b6-v7x5t is already assigned to node \"ha-783738-m04\"" plugin="DefaultBinder" pod="default/busybox-58667487b6-v7x5t" node="ha-783738-m04"
E0217 11:56:05.971236 1 schedule_one.go:359] "scheduler cache ForgetPod failed" err="pod c5148a30-9b13-42ed-87c8-723413b074d3(default/busybox-58667487b6-v7x5t) wasn't assumed so cannot be forgotten" pod="default/busybox-58667487b6-v7x5t"
E0217 11:56:05.971303 1 schedule_one.go:1058] "Error scheduling pod; retrying" err="running Bind plugin \"DefaultBinder\": Operation cannot be fulfilled on pods/binding \"busybox-58667487b6-v7x5t\": pod busybox-58667487b6-v7x5t is already assigned to node \"ha-783738-m04\"" pod="default/busybox-58667487b6-v7x5t"
I0217 11:56:05.971509 1 schedule_one.go:1071] "Pod has been assigned to node. Abort adding it back to queue." pod="default/busybox-58667487b6-v7x5t" node="ha-783738-m04"
E0217 11:56:37.999387 1 run.go:72] "command failed" err="finished without leader elect"
==> kube-scheduler [d524d25a3256] <==
E0217 11:58:26.313559 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Pod: failed to list *v1.Pod: Get \"https://192.168.39.249:8443/api/v1/pods?fieldSelector=status.phase%21%3DSucceeded%2Cstatus.phase%21%3DFailed&limit=500&resourceVersion=0\": dial tcp 192.168.39.249:8443: connect: connection refused - error from a previous attempt: read tcp 192.168.39.249:37922->192.168.39.249:8443: read: connection reset by peer" logger="UnhandledError"
W0217 11:58:26.313700 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSINode: Get "https://192.168.39.249:8443/apis/storage.k8s.io/v1/csinodes?limit=500&resourceVersion=0": dial tcp 192.168.39.249:8443: connect: connection refused - error from a previous attempt: read tcp 192.168.39.249:37926->192.168.39.249:8443: read: connection reset by peer
E0217 11:58:26.313773 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSINode: failed to list *v1.CSINode: Get \"https://192.168.39.249:8443/apis/storage.k8s.io/v1/csinodes?limit=500&resourceVersion=0\": dial tcp 192.168.39.249:8443: connect: connection refused - error from a previous attempt: read tcp 192.168.39.249:37926->192.168.39.249:8443: read: connection reset by peer" logger="UnhandledError"
W0217 11:58:26.313906 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PersistentVolumeClaim: Get "https://192.168.39.249:8443/api/v1/persistentvolumeclaims?limit=500&resourceVersion=0": dial tcp 192.168.39.249:8443: connect: connection refused - error from a previous attempt: read tcp 192.168.39.249:37956->192.168.39.249:8443: read: connection reset by peer
E0217 11:58:26.313971 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: Get \"https://192.168.39.249:8443/api/v1/persistentvolumeclaims?limit=500&resourceVersion=0\": dial tcp 192.168.39.249:8443: connect: connection refused - error from a previous attempt: read tcp 192.168.39.249:37956->192.168.39.249:8443: read: connection reset by peer" logger="UnhandledError"
W0217 11:58:26.314101 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://192.168.39.249:8443/api/v1/services?limit=500&resourceVersion=0": dial tcp 192.168.39.249:8443: connect: connection refused - error from a previous attempt: read tcp 192.168.39.249:37960->192.168.39.249:8443: read: connection reset by peer
E0217 11:58:26.314185 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://192.168.39.249:8443/api/v1/services?limit=500&resourceVersion=0\": dial tcp 192.168.39.249:8443: connect: connection refused - error from a previous attempt: read tcp 192.168.39.249:37960->192.168.39.249:8443: read: connection reset by peer" logger="UnhandledError"
W0217 11:58:26.314462 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Namespace: Get "https://192.168.39.249:8443/api/v1/namespaces?limit=500&resourceVersion=0": dial tcp 192.168.39.249:8443: connect: connection refused - error from a previous attempt: read tcp 192.168.39.249:37888->192.168.39.249:8443: read: connection reset by peer
E0217 11:58:26.314547 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Namespace: failed to list *v1.Namespace: Get \"https://192.168.39.249:8443/api/v1/namespaces?limit=500&resourceVersion=0\": dial tcp 192.168.39.249:8443: connect: connection refused - error from a previous attempt: read tcp 192.168.39.249:37888->192.168.39.249:8443: read: connection reset by peer" logger="UnhandledError"
W0217 11:58:26.314713 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.ReplicationController: Get "https://192.168.39.249:8443/api/v1/replicationcontrollers?limit=500&resourceVersion=0": dial tcp 192.168.39.249:8443: connect: connection refused - error from a previous attempt: read tcp 192.168.39.249:37930->192.168.39.249:8443: read: connection reset by peer
E0217 11:58:26.314798 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: Get \"https://192.168.39.249:8443/api/v1/replicationcontrollers?limit=500&resourceVersion=0\": dial tcp 192.168.39.249:8443: connect: connection refused - error from a previous attempt: read tcp 192.168.39.249:37930->192.168.39.249:8443: read: connection reset by peer" logger="UnhandledError"
W0217 11:58:26.314960 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PodDisruptionBudget: Get "https://192.168.39.249:8443/apis/policy/v1/poddisruptionbudgets?limit=500&resourceVersion=0": dial tcp 192.168.39.249:8443: connect: connection refused - error from a previous attempt: read tcp 192.168.39.249:37948->192.168.39.249:8443: read: connection reset by peer
E0217 11:58:26.315166 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: Get \"https://192.168.39.249:8443/apis/policy/v1/poddisruptionbudgets?limit=500&resourceVersion=0\": dial tcp 192.168.39.249:8443: connect: connection refused - error from a previous attempt: read tcp 192.168.39.249:37948->192.168.39.249:8443: read: connection reset by peer" logger="UnhandledError"
W0217 11:58:26.315243 1 reflector.go:569] runtime/asm_amd64.s:1700: failed to list *v1.ConfigMap: Get "https://192.168.39.249:8443/api/v1/namespaces/kube-system/configmaps?fieldSelector=metadata.name%3Dextension-apiserver-authentication&limit=500&resourceVersion=0": dial tcp 192.168.39.249:8443: connect: connection refused - error from a previous attempt: read tcp 192.168.39.249:37940->192.168.39.249:8443: read: connection reset by peer
E0217 11:58:26.315352 1 reflector.go:166] "Unhandled Error" err="runtime/asm_amd64.s:1700: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: Get \"https://192.168.39.249:8443/api/v1/namespaces/kube-system/configmaps?fieldSelector=metadata.name%3Dextension-apiserver-authentication&limit=500&resourceVersion=0\": dial tcp 192.168.39.249:8443: connect: connection refused - error from a previous attempt: read tcp 192.168.39.249:37940->192.168.39.249:8443: read: connection reset by peer" logger="UnhandledError"
W0217 11:58:29.432094 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PersistentVolume: Get "https://192.168.39.249:8443/api/v1/persistentvolumes?limit=500&resourceVersion=0": dial tcp 192.168.39.249:8443: connect: connection refused
E0217 11:58:29.432235 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: Get \"https://192.168.39.249:8443/api/v1/persistentvolumes?limit=500&resourceVersion=0\": dial tcp 192.168.39.249:8443: connect: connection refused" logger="UnhandledError"
W0217 11:58:32.758441 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.ReplicaSet: Get "https://192.168.39.249:8443/apis/apps/v1/replicasets?limit=500&resourceVersion=0": dial tcp 192.168.39.249:8443: connect: connection refused
E0217 11:58:32.758583 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: Get \"https://192.168.39.249:8443/apis/apps/v1/replicasets?limit=500&resourceVersion=0\": dial tcp 192.168.39.249:8443: connect: connection refused" logger="UnhandledError"
W0217 11:58:33.069242 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.StorageClass: Get "https://192.168.39.249:8443/apis/storage.k8s.io/v1/storageclasses?limit=500&resourceVersion=0": dial tcp 192.168.39.249:8443: connect: connection refused
E0217 11:58:33.069380 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: Get \"https://192.168.39.249:8443/apis/storage.k8s.io/v1/storageclasses?limit=500&resourceVersion=0\": dial tcp 192.168.39.249:8443: connect: connection refused" logger="UnhandledError"
W0217 11:58:35.727701 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://192.168.39.249:8443/api/v1/nodes?limit=500&resourceVersion=0": dial tcp 192.168.39.249:8443: connect: connection refused
E0217 11:58:35.727922 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://192.168.39.249:8443/api/v1/nodes?limit=500&resourceVersion=0\": dial tcp 192.168.39.249:8443: connect: connection refused" logger="UnhandledError"
W0217 11:58:36.974377 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.StatefulSet: Get "https://192.168.39.249:8443/apis/apps/v1/statefulsets?limit=500&resourceVersion=0": dial tcp 192.168.39.249:8443: connect: connection refused
E0217 11:58:36.974419 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: Get \"https://192.168.39.249:8443/apis/apps/v1/statefulsets?limit=500&resourceVersion=0\": dial tcp 192.168.39.249:8443: connect: connection refused" logger="UnhandledError"
==> kubelet <==
Feb 17 11:58:26 ha-783738 kubelet[1591]: I0217 11:58:26.486508 1591 scope.go:117] "RemoveContainer" containerID="1683ded4f12ef91eea7067f33248f5185b17f0532a1c1480efe277bcd8accfe6"
Feb 17 11:58:26 ha-783738 kubelet[1591]: E0217 11:58:26.487506 1591 kubelet.go:3196] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"ha-783738\" not found" node="ha-783738"
Feb 17 11:58:26 ha-783738 kubelet[1591]: I0217 11:58:26.487581 1591 scope.go:117] "RemoveContainer" containerID="2e90f752fdc0601abb5401e228fa8355b97462cfd9f4dafb766f56eaf8e7b13a"
Feb 17 11:58:26 ha-783738 kubelet[1591]: E0217 11:58:26.487721 1591 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-controller-manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-controller-manager pod=kube-controller-manager-ha-783738_kube-system(37cb2af166ca362ca24afd5a80241d47)\"" pod="kube-system/kube-controller-manager-ha-783738" podUID="37cb2af166ca362ca24afd5a80241d47"
Feb 17 11:58:26 ha-783738 kubelet[1591]: E0217 11:58:26.495193 1591 kubelet.go:3196] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"ha-783738\" not found" node="ha-783738"
Feb 17 11:58:26 ha-783738 kubelet[1591]: I0217 11:58:26.495253 1591 scope.go:117] "RemoveContainer" containerID="0d8dd6abc6b0262f0e2de062685df6bbc87187dd14023d0fd12b894f48bd2001"
Feb 17 11:58:26 ha-783738 kubelet[1591]: E0217 11:58:26.495523 1591 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-apiserver pod=kube-apiserver-ha-783738_kube-system(77f0e47471ffa89381403ccfd101e5e7)\"" pod="kube-system/kube-apiserver-ha-783738" podUID="77f0e47471ffa89381403ccfd101e5e7"
Feb 17 11:58:26 ha-783738 kubelet[1591]: E0217 11:58:26.703334 1591 eviction_manager.go:292] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"ha-783738\" not found"
Feb 17 11:58:27 ha-783738 kubelet[1591]: E0217 11:58:27.238622 1591 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://control-plane.minikube.internal:8443/api/v1/namespaces/default/events\": dial tcp 192.168.39.254:8443: connect: no route to host" event="&Event{ObjectMeta:{ha-783738.1824fce9ab5e06e9 default 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:ha-783738,UID:ha-783738,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:ha-783738,},FirstTimestamp:2025-02-17 11:57:16.604499689 +0000 UTC m=+0.220042798,LastTimestamp:2025-02-17 11:57:16.604499689 +0000 UTC m=+0.220042798,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:ha-783738,}"
Feb 17 11:58:30 ha-783738 kubelet[1591]: E0217 11:58:30.957653 1591 kubelet.go:3196] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"ha-783738\" not found" node="ha-783738"
Feb 17 11:58:30 ha-783738 kubelet[1591]: I0217 11:58:30.957784 1591 scope.go:117] "RemoveContainer" containerID="0d8dd6abc6b0262f0e2de062685df6bbc87187dd14023d0fd12b894f48bd2001"
Feb 17 11:58:30 ha-783738 kubelet[1591]: E0217 11:58:30.957928 1591 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-apiserver pod=kube-apiserver-ha-783738_kube-system(77f0e47471ffa89381403ccfd101e5e7)\"" pod="kube-system/kube-apiserver-ha-783738" podUID="77f0e47471ffa89381403ccfd101e5e7"
Feb 17 11:58:31 ha-783738 kubelet[1591]: I0217 11:58:31.169391 1591 kubelet_node_status.go:76] "Attempting to register node" node="ha-783738"
Feb 17 11:58:32 ha-783738 kubelet[1591]: E0217 11:58:32.182236 1591 kubelet.go:3196] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"ha-783738\" not found" node="ha-783738"
Feb 17 11:58:32 ha-783738 kubelet[1591]: I0217 11:58:32.182362 1591 scope.go:117] "RemoveContainer" containerID="2e90f752fdc0601abb5401e228fa8355b97462cfd9f4dafb766f56eaf8e7b13a"
Feb 17 11:58:32 ha-783738 kubelet[1591]: E0217 11:58:32.182489 1591 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-controller-manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-controller-manager pod=kube-controller-manager-ha-783738_kube-system(37cb2af166ca362ca24afd5a80241d47)\"" pod="kube-system/kube-controller-manager-ha-783738" podUID="37cb2af166ca362ca24afd5a80241d47"
Feb 17 11:58:33 ha-783738 kubelet[1591]: E0217 11:58:33.382650 1591 kubelet_node_status.go:108] "Unable to register node with API server" err="Post \"https://control-plane.minikube.internal:8443/api/v1/nodes\": dial tcp 192.168.39.254:8443: connect: no route to host" node="ha-783738"
Feb 17 11:58:33 ha-783738 kubelet[1591]: E0217 11:58:33.382815 1591 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://control-plane.minikube.internal:8443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/ha-783738?timeout=10s\": dial tcp 192.168.39.254:8443: connect: no route to host" interval="7s"
Feb 17 11:58:33 ha-783738 kubelet[1591]: W0217 11:58:33.382655 1591 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://control-plane.minikube.internal:8443/api/v1/nodes?fieldSelector=metadata.name%3Dha-783738&limit=500&resourceVersion=0": dial tcp 192.168.39.254:8443: connect: no route to host
Feb 17 11:58:33 ha-783738 kubelet[1591]: E0217 11:58:33.383127 1591 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://control-plane.minikube.internal:8443/api/v1/nodes?fieldSelector=metadata.name%3Dha-783738&limit=500&resourceVersion=0\": dial tcp 192.168.39.254:8443: connect: no route to host" logger="UnhandledError"
Feb 17 11:58:36 ha-783738 kubelet[1591]: E0217 11:58:36.704343 1591 eviction_manager.go:292] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"ha-783738\" not found"
Feb 17 11:58:37 ha-783738 kubelet[1591]: E0217 11:58:37.748003 1591 kubelet.go:3196] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"ha-783738\" not found" node="ha-783738"
Feb 17 11:58:39 ha-783738 kubelet[1591]: E0217 11:58:39.526616 1591 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://control-plane.minikube.internal:8443/api/v1/namespaces/default/events\": dial tcp 192.168.39.254:8443: connect: no route to host" event="&Event{ObjectMeta:{ha-783738.1824fce9ab5e06e9 default 0 0001-01-01 00:00:00 +0000 UTC <nil> <nil> map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:ha-783738,UID:ha-783738,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:ha-783738,},FirstTimestamp:2025-02-17 11:57:16.604499689 +0000 UTC m=+0.220042798,LastTimestamp:2025-02-17 11:57:16.604499689 +0000 UTC m=+0.220042798,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:ha-783738,}"
Feb 17 11:58:39 ha-783738 kubelet[1591]: E0217 11:58:39.748034 1591 kubelet.go:3196] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"ha-783738\" not found" node="ha-783738"
Feb 17 11:58:40 ha-783738 kubelet[1591]: I0217 11:58:40.384759 1591 kubelet_node_status.go:76] "Attempting to register node" node="ha-783738"
-- /stdout --
helpers_test.go:254: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p ha-783738 -n ha-783738
helpers_test.go:254: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.APIServer}} -p ha-783738 -n ha-783738: exit status 2 (233.658036ms)
-- stdout --
Stopped
-- /stdout --
helpers_test.go:254: status error: exit status 2 (may be ok)
helpers_test.go:256: "ha-783738" apiserver is not running, skipping kubectl commands (state="Stopped")
--- FAIL: TestMultiControlPlane/serial/RestartCluster (112.44s)