=== RUN TestMultiControlPlane/serial/StartCluster
ha_test.go:101: (dbg) Run: out/minikube-linux-amd64 -p ha-434755 start --ha --memory 3072 --wait true --alsologtostderr -v 5 --driver=docker --container-runtime=docker
E0919 22:25:08.951663 146335 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/addons-810554/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0919 22:27:25.092055 146335 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/addons-810554/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0919 22:27:52.796157 146335 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/addons-810554/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0919 22:28:33.466420 146335 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/functional-432755/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0919 22:28:33.472796 146335 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/functional-432755/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0919 22:28:33.484145 146335 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/functional-432755/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0919 22:28:33.505599 146335 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/functional-432755/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0919 22:28:33.547020 146335 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/functional-432755/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0919 22:28:33.628482 146335 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/functional-432755/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0919 22:28:33.790028 146335 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/functional-432755/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0919 22:28:34.111699 146335 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/functional-432755/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0919 22:28:34.753553 146335 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/functional-432755/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0919 22:28:36.035653 146335 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/functional-432755/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0919 22:28:38.597484 146335 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/functional-432755/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0919 22:28:43.719734 146335 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/functional-432755/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0919 22:28:53.961414 146335 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/functional-432755/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0919 22:29:14.443747 146335 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/functional-432755/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
ha_test.go:101: (dbg) Non-zero exit: out/minikube-linux-amd64 -p ha-434755 start --ha --memory 3072 --wait true --alsologtostderr -v 5 --driver=docker --container-runtime=docker: exit status 80 (5m22.640693487s)
-- stdout --
* [ha-434755] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
- MINIKUBE_LOCATION=21594
- MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
- KUBECONFIG=/home/jenkins/minikube-integration/21594-142711/kubeconfig
- MINIKUBE_HOME=/home/jenkins/minikube-integration/21594-142711/.minikube
- MINIKUBE_BIN=out/minikube-linux-amd64
- MINIKUBE_FORCE_SYSTEMD=
* Using the docker driver based on user configuration
* Using Docker driver with root privileges
* Starting "ha-434755" primary control-plane node in "ha-434755" cluster
* Pulling base image v0.0.48 ...
* Configuring CNI (Container Networking Interface) ...
- Using image gcr.io/k8s-minikube/storage-provisioner:v5
* Enabled addons: storage-provisioner, default-storageclass
* Starting "ha-434755-m02" control-plane node in "ha-434755" cluster
* Pulling base image v0.0.48 ...
* Found network options:
- NO_PROXY=192.168.49.2
- env NO_PROXY=192.168.49.2
* Verifying Kubernetes components...
* Starting "ha-434755-m03" control-plane node in "ha-434755" cluster
* Pulling base image v0.0.48 ...
* Found network options:
- NO_PROXY=192.168.49.2,192.168.49.3
- env NO_PROXY=192.168.49.2
- env NO_PROXY=192.168.49.2,192.168.49.3
* Verifying Kubernetes components...
-- /stdout --
** stderr **
I0919 22:24:21.076123 203160 out.go:360] Setting OutFile to fd 1 ...
I0919 22:24:21.076224 203160 out.go:408] TERM=,COLORTERM=, which probably does not support color
I0919 22:24:21.076232 203160 out.go:374] Setting ErrFile to fd 2...
I0919 22:24:21.076236 203160 out.go:408] TERM=,COLORTERM=, which probably does not support color
I0919 22:24:21.076432 203160 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21594-142711/.minikube/bin
I0919 22:24:21.076920 203160 out.go:368] Setting JSON to false
I0919 22:24:21.077711 203160 start.go:130] hostinfo: {"hostname":"ubuntu-20-agent-10","uptime":3997,"bootTime":1758316664,"procs":190,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"22.04","kernelVersion":"6.8.0-1037-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I0919 22:24:21.077805 203160 start.go:140] virtualization: kvm guest
I0919 22:24:21.079564 203160 out.go:179] * [ha-434755] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
I0919 22:24:21.080690 203160 out.go:179] - MINIKUBE_LOCATION=21594
I0919 22:24:21.080699 203160 notify.go:220] Checking for updates...
I0919 22:24:21.081753 203160 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I0919 22:24:21.082865 203160 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/21594-142711/kubeconfig
I0919 22:24:21.084034 203160 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/21594-142711/.minikube
I0919 22:24:21.085082 203160 out.go:179] - MINIKUBE_BIN=out/minikube-linux-amd64
I0919 22:24:21.086101 203160 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I0919 22:24:21.087230 203160 driver.go:421] Setting default libvirt URI to qemu:///system
I0919 22:24:21.110266 203160 docker.go:123] docker version: linux-28.4.0:Docker Engine - Community
I0919 22:24:21.110338 203160 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0919 22:24:21.164419 203160 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:24 OomKillDisable:false NGoroutines:46 SystemTime:2025-09-19 22:24:21.153482571 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:6.8.0-1037-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:
x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652174848 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-10 Labels:[] ExperimentalBuild:false ServerVersion:28.4.0 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[
map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.28.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.39.4] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner (EXPERIMENTAL) Vendor:Docker Inc. Version:v0.1.39] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0919 22:24:21.164556 203160 docker.go:318] overlay module found
I0919 22:24:21.166256 203160 out.go:179] * Using the docker driver based on user configuration
I0919 22:24:21.167251 203160 start.go:304] selected driver: docker
I0919 22:24:21.167262 203160 start.go:918] validating driver "docker" against <nil>
I0919 22:24:21.167273 203160 start.go:929] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I0919 22:24:21.167837 203160 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0919 22:24:21.218732 203160 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:24 OomKillDisable:false NGoroutines:46 SystemTime:2025-09-19 22:24:21.209383411 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:6.8.0-1037-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:
x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652174848 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-10 Labels:[] ExperimentalBuild:false ServerVersion:28.4.0 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[
map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.28.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.39.4] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner (EXPERIMENTAL) Vendor:Docker Inc. Version:v0.1.39] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0919 22:24:21.218890 203160 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I0919 22:24:21.219109 203160 start_flags.go:992] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0919 22:24:21.220600 203160 out.go:179] * Using Docker driver with root privileges
I0919 22:24:21.221617 203160 cni.go:84] Creating CNI manager for ""
I0919 22:24:21.221686 203160 cni.go:136] multinode detected (0 nodes found), recommending kindnet
I0919 22:24:21.221699 203160 start_flags.go:336] Found "CNI" CNI - setting NetworkPlugin=cni
I0919 22:24:21.221777 203160 start.go:348] cluster config:
{Name:ha-434755 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:ha-434755 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin
:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0919 22:24:21.222962 203160 out.go:179] * Starting "ha-434755" primary control-plane node in "ha-434755" cluster
I0919 22:24:21.223920 203160 cache.go:123] Beginning downloading kic base image for docker with docker
I0919 22:24:21.224932 203160 out.go:179] * Pulling base image v0.0.48 ...
I0919 22:24:21.225767 203160 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime docker
I0919 22:24:21.225807 203160 preload.go:146] Found local preload: /home/jenkins/minikube-integration/21594-142711/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-amd64.tar.lz4
I0919 22:24:21.225817 203160 cache.go:58] Caching tarball of preloaded images
I0919 22:24:21.225855 203160 image.go:81] Checking for gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local docker daemon
I0919 22:24:21.225956 203160 preload.go:172] Found /home/jenkins/minikube-integration/21594-142711/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-amd64.tar.lz4 in cache, skipping download
I0919 22:24:21.225972 203160 cache.go:61] Finished verifying existence of preloaded tar for v1.34.0 on docker
I0919 22:24:21.226288 203160 profile.go:143] Saving config to /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/config.json ...
I0919 22:24:21.226314 203160 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/config.json: {Name:mkebfaf58402ee5b29f1d566a094ba67c667bd07 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:24:21.245058 203160 image.go:100] Found gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local docker daemon, skipping pull
I0919 22:24:21.245075 203160 cache.go:147] gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 exists in daemon, skipping load
I0919 22:24:21.245090 203160 cache.go:232] Successfully downloaded all kic artifacts
I0919 22:24:21.245116 203160 start.go:360] acquireMachinesLock for ha-434755: {Name:mkbee2b246a2c7257f14e13c0a2cc8098703a645 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0919 22:24:21.245221 203160 start.go:364] duration metric: took 85.831µs to acquireMachinesLock for "ha-434755"
I0919 22:24:21.245250 203160 start.go:93] Provisioning new machine with config: &{Name:ha-434755 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:ha-434755 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APISer
verIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: Socket
VMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}
I0919 22:24:21.245320 203160 start.go:125] createHost starting for "" (driver="docker")
I0919 22:24:21.246894 203160 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I0919 22:24:21.247127 203160 start.go:159] libmachine.API.Create for "ha-434755" (driver="docker")
I0919 22:24:21.247160 203160 client.go:168] LocalClient.Create starting
I0919 22:24:21.247231 203160 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem
I0919 22:24:21.247268 203160 main.go:141] libmachine: Decoding PEM data...
I0919 22:24:21.247320 203160 main.go:141] libmachine: Parsing certificate...
I0919 22:24:21.247397 203160 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21594-142711/.minikube/certs/cert.pem
I0919 22:24:21.247432 203160 main.go:141] libmachine: Decoding PEM data...
I0919 22:24:21.247449 203160 main.go:141] libmachine: Parsing certificate...
I0919 22:24:21.247869 203160 cli_runner.go:164] Run: docker network inspect ha-434755 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W0919 22:24:21.263071 203160 cli_runner.go:211] docker network inspect ha-434755 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I0919 22:24:21.263128 203160 network_create.go:284] running [docker network inspect ha-434755] to gather additional debugging logs...
I0919 22:24:21.263150 203160 cli_runner.go:164] Run: docker network inspect ha-434755
W0919 22:24:21.278228 203160 cli_runner.go:211] docker network inspect ha-434755 returned with exit code 1
I0919 22:24:21.278257 203160 network_create.go:287] error running [docker network inspect ha-434755]: docker network inspect ha-434755: exit status 1
stdout:
[]
stderr:
Error response from daemon: network ha-434755 not found
I0919 22:24:21.278276 203160 network_create.go:289] output of [docker network inspect ha-434755]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network ha-434755 not found
** /stderr **
I0919 22:24:21.278380 203160 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0919 22:24:21.293889 203160 network.go:206] using free private subnet 192.168.49.0/24: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001a50f90}
I0919 22:24:21.293945 203160 network_create.go:124] attempt to create docker network ha-434755 192.168.49.0/24 with gateway 192.168.49.1 and MTU of 1500 ...
I0919 22:24:21.293988 203160 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.49.0/24 --gateway=192.168.49.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=ha-434755 ha-434755
I0919 22:24:21.346619 203160 network_create.go:108] docker network ha-434755 192.168.49.0/24 created
I0919 22:24:21.346647 203160 kic.go:121] calculated static IP "192.168.49.2" for the "ha-434755" container
I0919 22:24:21.346698 203160 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0919 22:24:21.362122 203160 cli_runner.go:164] Run: docker volume create ha-434755 --label name.minikube.sigs.k8s.io=ha-434755 --label created_by.minikube.sigs.k8s.io=true
I0919 22:24:21.378481 203160 oci.go:103] Successfully created a docker volume ha-434755
I0919 22:24:21.378568 203160 cli_runner.go:164] Run: docker run --rm --name ha-434755-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=ha-434755 --entrypoint /usr/bin/test -v ha-434755:/var gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -d /var/lib
I0919 22:24:21.725934 203160 oci.go:107] Successfully prepared a docker volume ha-434755
I0919 22:24:21.725988 203160 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime docker
I0919 22:24:21.726011 203160 kic.go:194] Starting extracting preloaded images to volume ...
I0919 22:24:21.726083 203160 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21594-142711/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v ha-434755:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -I lz4 -xf /preloaded.tar -C /extractDir
I0919 22:24:25.368758 203160 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21594-142711/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v ha-434755:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -I lz4 -xf /preloaded.tar -C /extractDir: (3.642631223s)
I0919 22:24:25.368791 203160 kic.go:203] duration metric: took 3.642776622s to extract preloaded images to volume ...
W0919 22:24:25.368885 203160 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
W0919 22:24:25.368918 203160 oci.go:252] Your kernel does not support CPU cfs period/quota or the cgroup is not mounted.
I0919 22:24:25.368955 203160 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0919 22:24:25.420305 203160 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname ha-434755 --name ha-434755 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=ha-434755 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=ha-434755 --network ha-434755 --ip 192.168.49.2 --volume ha-434755:/var --security-opt apparmor=unconfined --memory=3072mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1
I0919 22:24:25.661250 203160 cli_runner.go:164] Run: docker container inspect ha-434755 --format={{.State.Running}}
I0919 22:24:25.679605 203160 cli_runner.go:164] Run: docker container inspect ha-434755 --format={{.State.Status}}
I0919 22:24:25.698105 203160 cli_runner.go:164] Run: docker exec ha-434755 stat /var/lib/dpkg/alternatives/iptables
I0919 22:24:25.750352 203160 oci.go:144] the created container "ha-434755" has a running status.
I0919 22:24:25.750385 203160 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755/id_rsa...
I0919 22:24:26.145646 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755/id_rsa.pub -> /home/docker/.ssh/authorized_keys
I0919 22:24:26.145696 203160 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0919 22:24:26.169661 203160 cli_runner.go:164] Run: docker container inspect ha-434755 --format={{.State.Status}}
I0919 22:24:26.186378 203160 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0919 22:24:26.186402 203160 kic_runner.go:114] Args: [docker exec --privileged ha-434755 chown docker:docker /home/docker/.ssh/authorized_keys]
I0919 22:24:26.236428 203160 cli_runner.go:164] Run: docker container inspect ha-434755 --format={{.State.Status}}
I0919 22:24:26.253812 203160 machine.go:93] provisionDockerMachine start ...
I0919 22:24:26.253917 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755
I0919 22:24:26.271856 203160 main.go:141] libmachine: Using SSH client type: native
I0919 22:24:26.272111 203160 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32783 <nil> <nil>}
I0919 22:24:26.272123 203160 main.go:141] libmachine: About to run SSH command:
hostname
I0919 22:24:26.403852 203160 main.go:141] libmachine: SSH cmd err, output: <nil>: ha-434755
I0919 22:24:26.403887 203160 ubuntu.go:182] provisioning hostname "ha-434755"
I0919 22:24:26.403968 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755
I0919 22:24:26.421146 203160 main.go:141] libmachine: Using SSH client type: native
I0919 22:24:26.421378 203160 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32783 <nil> <nil>}
I0919 22:24:26.421391 203160 main.go:141] libmachine: About to run SSH command:
sudo hostname ha-434755 && echo "ha-434755" | sudo tee /etc/hostname
I0919 22:24:26.565038 203160 main.go:141] libmachine: SSH cmd err, output: <nil>: ha-434755
I0919 22:24:26.565121 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755
I0919 22:24:26.582234 203160 main.go:141] libmachine: Using SSH client type: native
I0919 22:24:26.582443 203160 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32783 <nil> <nil>}
I0919 22:24:26.582460 203160 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\sha-434755' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 ha-434755/g' /etc/hosts;
else
echo '127.0.1.1 ha-434755' | sudo tee -a /etc/hosts;
fi
fi
I0919 22:24:26.715045 203160 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0919 22:24:26.715078 203160 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21594-142711/.minikube CaCertPath:/home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21594-142711/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21594-142711/.minikube}
I0919 22:24:26.715105 203160 ubuntu.go:190] setting up certificates
I0919 22:24:26.715115 203160 provision.go:84] configureAuth start
I0919 22:24:26.715165 203160 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-434755
I0919 22:24:26.732003 203160 provision.go:143] copyHostCerts
I0919 22:24:26.732039 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/cert.pem -> /home/jenkins/minikube-integration/21594-142711/.minikube/cert.pem
I0919 22:24:26.732068 203160 exec_runner.go:144] found /home/jenkins/minikube-integration/21594-142711/.minikube/cert.pem, removing ...
I0919 22:24:26.732077 203160 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21594-142711/.minikube/cert.pem
I0919 22:24:26.732143 203160 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21594-142711/.minikube/cert.pem (1123 bytes)
I0919 22:24:26.732228 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/key.pem -> /home/jenkins/minikube-integration/21594-142711/.minikube/key.pem
I0919 22:24:26.732246 203160 exec_runner.go:144] found /home/jenkins/minikube-integration/21594-142711/.minikube/key.pem, removing ...
I0919 22:24:26.732250 203160 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21594-142711/.minikube/key.pem
I0919 22:24:26.732275 203160 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21594-142711/.minikube/key.pem (1675 bytes)
I0919 22:24:26.732321 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem -> /home/jenkins/minikube-integration/21594-142711/.minikube/ca.pem
I0919 22:24:26.732338 203160 exec_runner.go:144] found /home/jenkins/minikube-integration/21594-142711/.minikube/ca.pem, removing ...
I0919 22:24:26.732344 203160 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21594-142711/.minikube/ca.pem
I0919 22:24:26.732367 203160 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21594-142711/.minikube/ca.pem (1078 bytes)
I0919 22:24:26.732417 203160 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21594-142711/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca-key.pem org=jenkins.ha-434755 san=[127.0.0.1 192.168.49.2 ha-434755 localhost minikube]
I0919 22:24:27.341034 203160 provision.go:177] copyRemoteCerts
I0919 22:24:27.341097 203160 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0919 22:24:27.341134 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755
I0919 22:24:27.360598 203160 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32783 SSHKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755/id_rsa Username:docker}
I0919 22:24:27.455483 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem -> /etc/docker/ca.pem
I0919 22:24:27.455564 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I0919 22:24:27.480468 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/machines/server.pem -> /etc/docker/server.pem
I0919 22:24:27.480525 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/machines/server.pem --> /etc/docker/server.pem (1200 bytes)
I0919 22:24:27.503241 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/machines/server-key.pem -> /etc/docker/server-key.pem
I0919 22:24:27.503287 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I0919 22:24:27.525743 203160 provision.go:87] duration metric: took 810.613663ms to configureAuth
I0919 22:24:27.525768 203160 ubuntu.go:206] setting minikube options for container-runtime
I0919 22:24:27.525921 203160 config.go:182] Loaded profile config "ha-434755": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0919 22:24:27.525973 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755
I0919 22:24:27.542866 203160 main.go:141] libmachine: Using SSH client type: native
I0919 22:24:27.543066 203160 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32783 <nil> <nil>}
I0919 22:24:27.543078 203160 main.go:141] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I0919 22:24:27.675714 203160 main.go:141] libmachine: SSH cmd err, output: <nil>: overlay
I0919 22:24:27.675740 203160 ubuntu.go:71] root file system type: overlay
I0919 22:24:27.675838 203160 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I0919 22:24:27.675893 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755
I0919 22:24:27.693429 203160 main.go:141] libmachine: Using SSH client type: native
I0919 22:24:27.693693 203160 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32783 <nil> <nil>}
I0919 22:24:27.693798 203160 main.go:141] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 \
-H fd:// --containerd=/run/containerd/containerd.sock \
-H unix:///var/run/docker.sock \
--default-ulimit=nofile=1048576:1048576 \
--tlsverify \
--tlscacert /etc/docker/ca.pem \
--tlscert /etc/docker/server.pem \
--tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I0919 22:24:27.843188 203160 main.go:141] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
I0919 22:24:27.843285 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755
I0919 22:24:27.860458 203160 main.go:141] libmachine: Using SSH client type: native
I0919 22:24:27.860715 203160 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32783 <nil> <nil>}
I0919 22:24:27.860742 203160 main.go:141] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I0919 22:24:28.937239 203160 main.go:141] libmachine: SSH cmd err, output: <nil>: --- /lib/systemd/system/docker.service 2025-09-03 20:55:49.000000000 +0000
+++ /lib/systemd/system/docker.service.new 2025-09-19 22:24:27.840752975 +0000
@@ -9,23 +9,34 @@
[Service]
Type=notify
-# the default is not to use systemd for cgroups because the delegate issues still
-# exists and systemd currently does not support the cgroup feature set required
-# for containers run by docker
-ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
-ExecReload=/bin/kill -s HUP $MAINPID
-TimeoutStartSec=0
-RestartSec=2
Restart=always
+
+
+# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
+# The base configuration already specifies an 'ExecStart=...' command. The first directive
+# here is to clear out that command inherited from the base configuration. Without this,
+# the command from the base configuration and the command specified here are treated as
+# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
+# will catch this invalid input and refuse to start the service with an error like:
+# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
+
+# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
+# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
+ExecStart=
+ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
+ExecReload=/bin/kill -s HUP $MAINPID
+
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
+LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
-# Comment TasksMax if your systemd version does not support it.
-# Only systemd 226 and above support this option.
+# Uncomment TasksMax if your systemd version supports it.
+# Only systemd 226 and above support this version.
TasksMax=infinity
+TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
Synchronizing state of docker.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable docker
I0919 22:24:28.937277 203160 machine.go:96] duration metric: took 2.683443018s to provisionDockerMachine
I0919 22:24:28.937292 203160 client.go:171] duration metric: took 7.690121191s to LocalClient.Create
I0919 22:24:28.937318 203160 start.go:167] duration metric: took 7.690191518s to libmachine.API.Create "ha-434755"
I0919 22:24:28.937332 203160 start.go:293] postStartSetup for "ha-434755" (driver="docker")
I0919 22:24:28.937346 203160 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0919 22:24:28.937417 203160 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0919 22:24:28.937468 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755
I0919 22:24:28.955631 203160 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32783 SSHKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755/id_rsa Username:docker}
I0919 22:24:29.052278 203160 ssh_runner.go:195] Run: cat /etc/os-release
I0919 22:24:29.055474 203160 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0919 22:24:29.055519 203160 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0919 22:24:29.055533 203160 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0919 22:24:29.055541 203160 info.go:137] Remote host: Ubuntu 22.04.5 LTS
I0919 22:24:29.055555 203160 filesync.go:126] Scanning /home/jenkins/minikube-integration/21594-142711/.minikube/addons for local assets ...
I0919 22:24:29.055607 203160 filesync.go:126] Scanning /home/jenkins/minikube-integration/21594-142711/.minikube/files for local assets ...
I0919 22:24:29.055697 203160 filesync.go:149] local asset: /home/jenkins/minikube-integration/21594-142711/.minikube/files/etc/ssl/certs/1463352.pem -> 1463352.pem in /etc/ssl/certs
I0919 22:24:29.055708 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/files/etc/ssl/certs/1463352.pem -> /etc/ssl/certs/1463352.pem
I0919 22:24:29.055792 203160 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I0919 22:24:29.064211 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/files/etc/ssl/certs/1463352.pem --> /etc/ssl/certs/1463352.pem (1708 bytes)
I0919 22:24:29.088887 203160 start.go:296] duration metric: took 151.540336ms for postStartSetup
I0919 22:24:29.089170 203160 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-434755
I0919 22:24:29.106927 203160 profile.go:143] Saving config to /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/config.json ...
I0919 22:24:29.107156 203160 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0919 22:24:29.107207 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755
I0919 22:24:29.123683 203160 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32783 SSHKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755/id_rsa Username:docker}
I0919 22:24:29.214129 203160 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0919 22:24:29.218338 203160 start.go:128] duration metric: took 7.973004208s to createHost
I0919 22:24:29.218360 203160 start.go:83] releasing machines lock for "ha-434755", held for 7.973124739s
I0919 22:24:29.218412 203160 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-434755
I0919 22:24:29.236040 203160 ssh_runner.go:195] Run: cat /version.json
I0919 22:24:29.236081 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755
I0919 22:24:29.236126 203160 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0919 22:24:29.236195 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755
I0919 22:24:29.253449 203160 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32783 SSHKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755/id_rsa Username:docker}
I0919 22:24:29.253827 203160 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32783 SSHKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755/id_rsa Username:docker}
I0919 22:24:29.414344 203160 ssh_runner.go:195] Run: systemctl --version
I0919 22:24:29.418771 203160 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0919 22:24:29.423119 203160 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I0919 22:24:29.450494 203160 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I0919 22:24:29.450577 203160 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0919 22:24:29.475768 203160 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I0919 22:24:29.475797 203160 start.go:495] detecting cgroup driver to use...
I0919 22:24:29.475832 203160 detect.go:190] detected "systemd" cgroup driver on host os
I0919 22:24:29.475949 203160 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0919 22:24:29.491395 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I0919 22:24:29.501756 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0919 22:24:29.511013 203160 containerd.go:146] configuring containerd to use "systemd" as cgroup driver...
I0919 22:24:29.511066 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I0919 22:24:29.520269 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0919 22:24:29.529232 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0919 22:24:29.538263 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0919 22:24:29.547175 203160 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0919 22:24:29.555699 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0919 22:24:29.564644 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0919 22:24:29.573613 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0919 22:24:29.582664 203160 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0919 22:24:29.590362 203160 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0919 22:24:29.598040 203160 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0919 22:24:29.662901 203160 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0919 22:24:29.737694 203160 start.go:495] detecting cgroup driver to use...
I0919 22:24:29.737750 203160 detect.go:190] detected "systemd" cgroup driver on host os
I0919 22:24:29.737804 203160 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I0919 22:24:29.750261 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I0919 22:24:29.761088 203160 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
I0919 22:24:29.781368 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I0919 22:24:29.792667 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0919 22:24:29.803679 203160 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I0919 22:24:29.819981 203160 ssh_runner.go:195] Run: which cri-dockerd
I0919 22:24:29.823528 203160 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I0919 22:24:29.833551 203160 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (192 bytes)
I0919 22:24:29.851373 203160 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I0919 22:24:29.919426 203160 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I0919 22:24:29.982907 203160 docker.go:575] configuring docker to use "systemd" as cgroup driver...
I0919 22:24:29.983042 203160 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (129 bytes)
I0919 22:24:30.001192 203160 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
I0919 22:24:30.012142 203160 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0919 22:24:30.077304 203160 ssh_runner.go:195] Run: sudo systemctl restart docker
I0919 22:24:30.841187 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I0919 22:24:30.852558 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I0919 22:24:30.863819 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0919 22:24:30.874629 203160 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I0919 22:24:30.936849 203160 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I0919 22:24:30.998282 203160 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0919 22:24:31.059613 203160 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I0919 22:24:31.085894 203160 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I0919 22:24:31.097613 203160 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0919 22:24:31.165516 203160 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I0919 22:24:31.237651 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0919 22:24:31.250126 203160 start.go:542] Will wait 60s for socket path /var/run/cri-dockerd.sock
I0919 22:24:31.250193 203160 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I0919 22:24:31.253768 203160 start.go:563] Will wait 60s for crictl version
I0919 22:24:31.253815 203160 ssh_runner.go:195] Run: which crictl
I0919 22:24:31.257175 203160 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0919 22:24:31.291330 203160 start.go:579] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 28.4.0
RuntimeApiVersion: v1
I0919 22:24:31.291400 203160 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0919 22:24:31.316224 203160 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0919 22:24:31.343571 203160 out.go:252] * Preparing Kubernetes v1.34.0 on Docker 28.4.0 ...
I0919 22:24:31.343639 203160 cli_runner.go:164] Run: docker network inspect ha-434755 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0919 22:24:31.360312 203160 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I0919 22:24:31.364394 203160 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0919 22:24:31.376325 203160 kubeadm.go:875] updating cluster {Name:ha-434755 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:ha-434755 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerNames:[] APIServerIP
s:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath:
SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I0919 22:24:31.376429 203160 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime docker
I0919 22:24:31.376472 203160 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0919 22:24:31.396685 203160 docker.go:691] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.34.0
registry.k8s.io/kube-scheduler:v1.34.0
registry.k8s.io/kube-controller-manager:v1.34.0
registry.k8s.io/kube-proxy:v1.34.0
registry.k8s.io/etcd:3.6.4-0
registry.k8s.io/pause:3.10.1
registry.k8s.io/coredns/coredns:v1.12.1
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I0919 22:24:31.396706 203160 docker.go:621] Images already preloaded, skipping extraction
I0919 22:24:31.396777 203160 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0919 22:24:31.417311 203160 docker.go:691] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.34.0
registry.k8s.io/kube-scheduler:v1.34.0
registry.k8s.io/kube-controller-manager:v1.34.0
registry.k8s.io/kube-proxy:v1.34.0
registry.k8s.io/etcd:3.6.4-0
registry.k8s.io/pause:3.10.1
registry.k8s.io/coredns/coredns:v1.12.1
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I0919 22:24:31.417334 203160 cache_images.go:85] Images are preloaded, skipping loading
I0919 22:24:31.417348 203160 kubeadm.go:926] updating node { 192.168.49.2 8443 v1.34.0 docker true true} ...
I0919 22:24:31.417454 203160 kubeadm.go:938] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=ha-434755 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.2
[Install]
config:
{KubernetesVersion:v1.34.0 ClusterName:ha-434755 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0919 22:24:31.417533 203160 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
I0919 22:24:31.468906 203160 cni.go:84] Creating CNI manager for ""
I0919 22:24:31.468934 203160 cni.go:136] multinode detected (1 nodes found), recommending kindnet
I0919 22:24:31.468949 203160 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
I0919 22:24:31.468980 203160 kubeadm.go:189] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8443 KubernetesVersion:v1.34.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:ha-434755 NodeName:ha-434755 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.49.2 CgroupDriver:systemd ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kubernetes/man
ifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I0919 22:24:31.469131 203160 kubeadm.go:195] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.49.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
name: "ha-434755"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.49.2"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.34.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I0919 22:24:31.469170 203160 kube-vip.go:115] generating kube-vip config ...
I0919 22:24:31.469222 203160 ssh_runner.go:195] Run: sudo sh -c "lsmod | grep ip_vs"
I0919 22:24:31.481888 203160 kube-vip.go:163] giving up enabling control-plane load-balancing as ipvs kernel modules appears not to be available: sudo sh -c "lsmod | grep ip_vs": Process exited with status 1
stdout:
stderr:
I0919 22:24:31.481979 203160 kube-vip.go:137] kube-vip config:
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
name: kube-vip
namespace: kube-system
spec:
containers:
- args:
- manager
env:
- name: vip_arp
value: "true"
- name: port
value: "8443"
- name: vip_nodename
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: vip_interface
value: eth0
- name: vip_cidr
value: "32"
- name: dns_mode
value: first
- name: cp_enable
value: "true"
- name: cp_namespace
value: kube-system
- name: vip_leaderelection
value: "true"
- name: vip_leasename
value: plndr-cp-lock
- name: vip_leaseduration
value: "5"
- name: vip_renewdeadline
value: "3"
- name: vip_retryperiod
value: "1"
- name: address
value: 192.168.49.254
- name: prometheus_server
value: :2112
image: ghcr.io/kube-vip/kube-vip:v1.0.0
imagePullPolicy: IfNotPresent
name: kube-vip
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_RAW
volumeMounts:
- mountPath: /etc/kubernetes/admin.conf
name: kubeconfig
hostAliases:
- hostnames:
- kubernetes
ip: 127.0.0.1
hostNetwork: true
volumes:
- hostPath:
path: "/etc/kubernetes/super-admin.conf"
name: kubeconfig
status: {}
I0919 22:24:31.482024 203160 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.0
I0919 22:24:31.490896 203160 binaries.go:44] Found k8s binaries, skipping transfer
I0919 22:24:31.490954 203160 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube /etc/kubernetes/manifests
I0919 22:24:31.499752 203160 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (308 bytes)
I0919 22:24:31.517642 203160 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0919 22:24:31.535661 203160 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2209 bytes)
I0919 22:24:31.552926 203160 ssh_runner.go:362] scp memory --> /etc/kubernetes/manifests/kube-vip.yaml (1364 bytes)
I0919 22:24:31.572177 203160 ssh_runner.go:195] Run: grep 192.168.49.254 control-plane.minikube.internal$ /etc/hosts
I0919 22:24:31.575892 203160 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.254 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0919 22:24:31.587094 203160 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0919 22:24:31.654039 203160 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0919 22:24:31.678017 203160 certs.go:68] Setting up /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755 for IP: 192.168.49.2
I0919 22:24:31.678046 203160 certs.go:194] generating shared ca certs ...
I0919 22:24:31.678070 203160 certs.go:226] acquiring lock for ca certs: {Name:mkc5df652d6204fd8687dfaaf83b02c6e10b58b2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:24:31.678228 203160 certs.go:235] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21594-142711/.minikube/ca.key
I0919 22:24:31.678271 203160 certs.go:235] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21594-142711/.minikube/proxy-client-ca.key
I0919 22:24:31.678281 203160 certs.go:256] generating profile certs ...
I0919 22:24:31.678337 203160 certs.go:363] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/client.key
I0919 22:24:31.678354 203160 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/client.crt with IP's: []
I0919 22:24:31.857665 203160 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/client.crt ...
I0919 22:24:31.857696 203160 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/client.crt: {Name:mk7ec51226de11d757f14966ffd43a2037698787 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:24:31.857881 203160 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/client.key ...
I0919 22:24:31.857892 203160 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/client.key: {Name:mkf584fffef919693714a07e5a88b44eca7219c8 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:24:31.857971 203160 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.key.9c8d1cb8
I0919 22:24:31.857986 203160 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.crt.9c8d1cb8 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.49.2 192.168.49.254]
I0919 22:24:32.133506 203160 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.crt.9c8d1cb8 ...
I0919 22:24:32.133540 203160 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.crt.9c8d1cb8: {Name:mkb81ce84ef58bc410b7449c932fc5a925016309 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:24:32.133711 203160 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.key.9c8d1cb8 ...
I0919 22:24:32.133729 203160 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.key.9c8d1cb8: {Name:mk079553ff6e398f68775f47e1ad8c0a1a64a140 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:24:32.133803 203160 certs.go:381] copying /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.crt.9c8d1cb8 -> /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.crt
I0919 22:24:32.133908 203160 certs.go:385] copying /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.key.9c8d1cb8 -> /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.key
I0919 22:24:32.133973 203160 certs.go:363] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/proxy-client.key
I0919 22:24:32.133989 203160 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/proxy-client.crt with IP's: []
I0919 22:24:32.385885 203160 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/proxy-client.crt ...
I0919 22:24:32.385919 203160 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/proxy-client.crt: {Name:mk3bec5b301362978b2b3b81fd3c21d3f704e1cb Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:24:32.386084 203160 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/proxy-client.key ...
I0919 22:24:32.386097 203160 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/proxy-client.key: {Name:mk9670132fab0c6814f19a454e4e08b86e71aeae Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:24:32.386174 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/ca.crt -> /var/lib/minikube/certs/ca.crt
I0919 22:24:32.386207 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/ca.key -> /var/lib/minikube/certs/ca.key
I0919 22:24:32.386221 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/proxy-client-ca.crt -> /var/lib/minikube/certs/proxy-client-ca.crt
I0919 22:24:32.386234 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/proxy-client-ca.key -> /var/lib/minikube/certs/proxy-client-ca.key
I0919 22:24:32.386246 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.crt -> /var/lib/minikube/certs/apiserver.crt
I0919 22:24:32.386271 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.key -> /var/lib/minikube/certs/apiserver.key
I0919 22:24:32.386283 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/proxy-client.crt -> /var/lib/minikube/certs/proxy-client.crt
I0919 22:24:32.386292 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/proxy-client.key -> /var/lib/minikube/certs/proxy-client.key
I0919 22:24:32.386341 203160 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/146335.pem (1338 bytes)
W0919 22:24:32.386378 203160 certs.go:480] ignoring /home/jenkins/minikube-integration/21594-142711/.minikube/certs/146335_empty.pem, impossibly tiny 0 bytes
I0919 22:24:32.386388 203160 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca-key.pem (1675 bytes)
I0919 22:24:32.386418 203160 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem (1078 bytes)
I0919 22:24:32.386443 203160 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/cert.pem (1123 bytes)
I0919 22:24:32.386467 203160 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/key.pem (1675 bytes)
I0919 22:24:32.386517 203160 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-142711/.minikube/files/etc/ssl/certs/1463352.pem (1708 bytes)
I0919 22:24:32.386548 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/files/etc/ssl/certs/1463352.pem -> /usr/share/ca-certificates/1463352.pem
I0919 22:24:32.386562 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/ca.crt -> /usr/share/ca-certificates/minikubeCA.pem
I0919 22:24:32.386574 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/146335.pem -> /usr/share/ca-certificates/146335.pem
I0919 22:24:32.387195 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0919 22:24:32.413179 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
I0919 22:24:32.437860 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0919 22:24:32.462719 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I0919 22:24:32.488640 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
I0919 22:24:32.513281 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
I0919 22:24:32.536826 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0919 22:24:32.559540 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I0919 22:24:32.582215 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/files/etc/ssl/certs/1463352.pem --> /usr/share/ca-certificates/1463352.pem (1708 bytes)
I0919 22:24:32.607378 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0919 22:24:32.629686 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/certs/146335.pem --> /usr/share/ca-certificates/146335.pem (1338 bytes)
I0919 22:24:32.651946 203160 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I0919 22:24:32.668687 203160 ssh_runner.go:195] Run: openssl version
I0919 22:24:32.673943 203160 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0919 22:24:32.683156 203160 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0919 22:24:32.686577 203160 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Sep 19 22:15 /usr/share/ca-certificates/minikubeCA.pem
I0919 22:24:32.686633 203160 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0919 22:24:32.693223 203160 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0919 22:24:32.702177 203160 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/146335.pem && ln -fs /usr/share/ca-certificates/146335.pem /etc/ssl/certs/146335.pem"
I0919 22:24:32.711521 203160 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/146335.pem
I0919 22:24:32.714732 203160 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Sep 19 22:20 /usr/share/ca-certificates/146335.pem
I0919 22:24:32.714766 203160 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/146335.pem
I0919 22:24:32.721219 203160 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/146335.pem /etc/ssl/certs/51391683.0"
I0919 22:24:32.730116 203160 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/1463352.pem && ln -fs /usr/share/ca-certificates/1463352.pem /etc/ssl/certs/1463352.pem"
I0919 22:24:32.739018 203160 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/1463352.pem
I0919 22:24:32.742287 203160 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Sep 19 22:20 /usr/share/ca-certificates/1463352.pem
I0919 22:24:32.742330 203160 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/1463352.pem
I0919 22:24:32.748703 203160 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/1463352.pem /etc/ssl/certs/3ec20f2e.0"
I0919 22:24:32.757370 203160 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0919 22:24:32.760542 203160 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I0919 22:24:32.760590 203160 kubeadm.go:392] StartCluster: {Name:ha-434755 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:ha-434755 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[
] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: So
cketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0919 22:24:32.760710 203160 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I0919 22:24:32.778911 203160 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I0919 22:24:32.787673 203160 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I0919 22:24:32.796245 203160 kubeadm.go:214] ignoring SystemVerification for kubeadm because of docker driver
I0919 22:24:32.796280 203160 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I0919 22:24:32.804896 203160 kubeadm.go:155] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I0919 22:24:32.804909 203160 kubeadm.go:157] found existing configuration files:
I0919 22:24:32.804937 203160 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I0919 22:24:32.813189 203160 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I0919 22:24:32.813229 203160 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I0919 22:24:32.821160 203160 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I0919 22:24:32.829194 203160 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I0919 22:24:32.829245 203160 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I0919 22:24:32.837031 203160 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I0919 22:24:32.845106 203160 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I0919 22:24:32.845150 203160 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I0919 22:24:32.853133 203160 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I0919 22:24:32.861349 203160 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I0919 22:24:32.861390 203160 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I0919 22:24:32.869355 203160 ssh_runner.go:286] Start: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I0919 22:24:32.905932 203160 kubeadm.go:310] [init] Using Kubernetes version: v1.34.0
I0919 22:24:32.906264 203160 kubeadm.go:310] [preflight] Running pre-flight checks
I0919 22:24:32.922979 203160 kubeadm.go:310] [preflight] The system verification failed. Printing the output from the verification:
I0919 22:24:32.923110 203160 kubeadm.go:310] [0;37mKERNEL_VERSION[0m: [0;32m6.8.0-1037-gcp[0m
I0919 22:24:32.923168 203160 kubeadm.go:310] [0;37mOS[0m: [0;32mLinux[0m
I0919 22:24:32.923231 203160 kubeadm.go:310] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I0919 22:24:32.923291 203160 kubeadm.go:310] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I0919 22:24:32.923361 203160 kubeadm.go:310] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I0919 22:24:32.923426 203160 kubeadm.go:310] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I0919 22:24:32.923486 203160 kubeadm.go:310] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I0919 22:24:32.923570 203160 kubeadm.go:310] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I0919 22:24:32.923633 203160 kubeadm.go:310] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I0919 22:24:32.923686 203160 kubeadm.go:310] [0;37mCGROUPS_IO[0m: [0;32menabled[0m
I0919 22:24:32.975656 203160 kubeadm.go:310] [preflight] Pulling images required for setting up a Kubernetes cluster
I0919 22:24:32.975772 203160 kubeadm.go:310] [preflight] This might take a minute or two, depending on the speed of your internet connection
I0919 22:24:32.975923 203160 kubeadm.go:310] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I0919 22:24:32.987123 203160 kubeadm.go:310] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I0919 22:24:32.990614 203160 out.go:252] - Generating certificates and keys ...
I0919 22:24:32.990701 203160 kubeadm.go:310] [certs] Using existing ca certificate authority
I0919 22:24:32.990790 203160 kubeadm.go:310] [certs] Using existing apiserver certificate and key on disk
I0919 22:24:33.305563 203160 kubeadm.go:310] [certs] Generating "apiserver-kubelet-client" certificate and key
I0919 22:24:33.403579 203160 kubeadm.go:310] [certs] Generating "front-proxy-ca" certificate and key
I0919 22:24:33.794985 203160 kubeadm.go:310] [certs] Generating "front-proxy-client" certificate and key
I0919 22:24:33.939882 203160 kubeadm.go:310] [certs] Generating "etcd/ca" certificate and key
I0919 22:24:34.319905 203160 kubeadm.go:310] [certs] Generating "etcd/server" certificate and key
I0919 22:24:34.320050 203160 kubeadm.go:310] [certs] etcd/server serving cert is signed for DNS names [ha-434755 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0919 22:24:34.571803 203160 kubeadm.go:310] [certs] Generating "etcd/peer" certificate and key
I0919 22:24:34.572036 203160 kubeadm.go:310] [certs] etcd/peer serving cert is signed for DNS names [ha-434755 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0919 22:24:34.785683 203160 kubeadm.go:310] [certs] Generating "etcd/healthcheck-client" certificate and key
I0919 22:24:34.913179 203160 kubeadm.go:310] [certs] Generating "apiserver-etcd-client" certificate and key
I0919 22:24:35.193757 203160 kubeadm.go:310] [certs] Generating "sa" key and public key
I0919 22:24:35.193908 203160 kubeadm.go:310] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I0919 22:24:35.269921 203160 kubeadm.go:310] [kubeconfig] Writing "admin.conf" kubeconfig file
I0919 22:24:35.432895 203160 kubeadm.go:310] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I0919 22:24:35.889148 203160 kubeadm.go:310] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I0919 22:24:36.099682 203160 kubeadm.go:310] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I0919 22:24:36.370632 203160 kubeadm.go:310] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I0919 22:24:36.371101 203160 kubeadm.go:310] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I0919 22:24:36.373221 203160 kubeadm.go:310] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I0919 22:24:36.375010 203160 out.go:252] - Booting up control plane ...
I0919 22:24:36.375112 203160 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-apiserver"
I0919 22:24:36.375205 203160 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I0919 22:24:36.375823 203160 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-scheduler"
I0919 22:24:36.385552 203160 kubeadm.go:310] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I0919 22:24:36.385660 203160 kubeadm.go:310] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
I0919 22:24:36.391155 203160 kubeadm.go:310] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
I0919 22:24:36.391446 203160 kubeadm.go:310] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I0919 22:24:36.391516 203160 kubeadm.go:310] [kubelet-start] Starting the kubelet
I0919 22:24:36.469169 203160 kubeadm.go:310] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I0919 22:24:36.469341 203160 kubeadm.go:310] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I0919 22:24:37.470960 203160 kubeadm.go:310] [kubelet-check] The kubelet is healthy after 1.001771868s
I0919 22:24:37.475271 203160 kubeadm.go:310] [control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s
I0919 22:24:37.475402 203160 kubeadm.go:310] [control-plane-check] Checking kube-apiserver at https://192.168.49.2:8443/livez
I0919 22:24:37.475560 203160 kubeadm.go:310] [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz
I0919 22:24:37.475683 203160 kubeadm.go:310] [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez
I0919 22:24:38.691996 203160 kubeadm.go:310] [control-plane-check] kube-controller-manager is healthy after 1.216651105s
I0919 22:24:39.748252 203160 kubeadm.go:310] [control-plane-check] kube-scheduler is healthy after 2.272903249s
I0919 22:24:43.641652 203160 kubeadm.go:310] [control-plane-check] kube-apiserver is healthy after 6.166322635s
I0919 22:24:43.652285 203160 kubeadm.go:310] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I0919 22:24:43.662136 203160 kubeadm.go:310] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I0919 22:24:43.670817 203160 kubeadm.go:310] [upload-certs] Skipping phase. Please see --upload-certs
I0919 22:24:43.671109 203160 kubeadm.go:310] [mark-control-plane] Marking the node ha-434755 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I0919 22:24:43.678157 203160 kubeadm.go:310] [bootstrap-token] Using token: g87idd.cyuzs8jougdixinx
I0919 22:24:43.679741 203160 out.go:252] - Configuring RBAC rules ...
I0919 22:24:43.679886 203160 kubeadm.go:310] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I0919 22:24:43.685914 203160 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I0919 22:24:43.691061 203160 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I0919 22:24:43.693550 203160 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I0919 22:24:43.697628 203160 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I0919 22:24:43.699973 203160 kubeadm.go:310] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I0919 22:24:44.047466 203160 kubeadm.go:310] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I0919 22:24:44.461485 203160 kubeadm.go:310] [addons] Applied essential addon: CoreDNS
I0919 22:24:45.047812 203160 kubeadm.go:310] [addons] Applied essential addon: kube-proxy
I0919 22:24:45.048594 203160 kubeadm.go:310]
I0919 22:24:45.048685 203160 kubeadm.go:310] Your Kubernetes control-plane has initialized successfully!
I0919 22:24:45.048725 203160 kubeadm.go:310]
I0919 22:24:45.048861 203160 kubeadm.go:310] To start using your cluster, you need to run the following as a regular user:
I0919 22:24:45.048871 203160 kubeadm.go:310]
I0919 22:24:45.048906 203160 kubeadm.go:310] mkdir -p $HOME/.kube
I0919 22:24:45.049005 203160 kubeadm.go:310] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I0919 22:24:45.049058 203160 kubeadm.go:310] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I0919 22:24:45.049064 203160 kubeadm.go:310]
I0919 22:24:45.049110 203160 kubeadm.go:310] Alternatively, if you are the root user, you can run:
I0919 22:24:45.049131 203160 kubeadm.go:310]
I0919 22:24:45.049219 203160 kubeadm.go:310] export KUBECONFIG=/etc/kubernetes/admin.conf
I0919 22:24:45.049232 203160 kubeadm.go:310]
I0919 22:24:45.049278 203160 kubeadm.go:310] You should now deploy a pod network to the cluster.
I0919 22:24:45.049339 203160 kubeadm.go:310] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I0919 22:24:45.049394 203160 kubeadm.go:310] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I0919 22:24:45.049400 203160 kubeadm.go:310]
I0919 22:24:45.049474 203160 kubeadm.go:310] You can now join any number of control-plane nodes by copying certificate authorities
I0919 22:24:45.049614 203160 kubeadm.go:310] and service account keys on each node and then running the following as root:
I0919 22:24:45.049627 203160 kubeadm.go:310]
I0919 22:24:45.049721 203160 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token g87idd.cyuzs8jougdixinx \
I0919 22:24:45.049859 203160 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:6e34938835ca5de20dcd743043ff221a1493ef970b34561f39a513839570935a \
I0919 22:24:45.049895 203160 kubeadm.go:310] --control-plane
I0919 22:24:45.049904 203160 kubeadm.go:310]
I0919 22:24:45.050015 203160 kubeadm.go:310] Then you can join any number of worker nodes by running the following on each as root:
I0919 22:24:45.050028 203160 kubeadm.go:310]
I0919 22:24:45.050110 203160 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token g87idd.cyuzs8jougdixinx \
I0919 22:24:45.050212 203160 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:6e34938835ca5de20dcd743043ff221a1493ef970b34561f39a513839570935a
I0919 22:24:45.053328 203160 kubeadm.go:310] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/6.8.0-1037-gcp\n", err: exit status 1
I0919 22:24:45.053440 203160 kubeadm.go:310] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I0919 22:24:45.053459 203160 cni.go:84] Creating CNI manager for ""
I0919 22:24:45.053466 203160 cni.go:136] multinode detected (1 nodes found), recommending kindnet
I0919 22:24:45.054970 203160 out.go:179] * Configuring CNI (Container Networking Interface) ...
I0919 22:24:45.056059 203160 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I0919 22:24:45.060192 203160 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.34.0/kubectl ...
I0919 22:24:45.060207 203160 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I0919 22:24:45.078671 203160 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I0919 22:24:45.281468 203160 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I0919 22:24:45.281585 203160 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I0919 22:24:45.281587 203160 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes ha-434755 minikube.k8s.io/updated_at=2025_09_19T22_24_45_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=6e37ee63f758843bb5fe33c3a528c564c4b83d53 minikube.k8s.io/name=ha-434755 minikube.k8s.io/primary=true
I0919 22:24:45.374035 203160 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0919 22:24:45.378242 203160 ops.go:34] apiserver oom_adj: -16
I0919 22:24:45.874252 203160 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0919 22:24:46.375078 203160 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0919 22:24:46.874791 203160 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0919 22:24:46.939251 203160 kubeadm.go:1105] duration metric: took 1.657752945s to wait for elevateKubeSystemPrivileges
I0919 22:24:46.939292 203160 kubeadm.go:394] duration metric: took 14.17870588s to StartCluster
I0919 22:24:46.939313 203160 settings.go:142] acquiring lock: {Name:mk0ff94a55db11c0f045ab7f983bc46c653527ba Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:24:46.939381 203160 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21594-142711/kubeconfig
I0919 22:24:46.940075 203160 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21594-142711/kubeconfig: {Name:mk4ed26fa289682c072e02c721ecb5e9a371ed27 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:24:46.940315 203160 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I0919 22:24:46.940328 203160 start.go:233] HA (multi-control plane) cluster: will skip waiting for primary control-plane node &{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}
I0919 22:24:46.940349 203160 start.go:241] waiting for startup goroutines ...
I0919 22:24:46.940375 203160 addons.go:511] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I0919 22:24:46.940455 203160 addons.go:69] Setting storage-provisioner=true in profile "ha-434755"
I0919 22:24:46.940480 203160 addons.go:69] Setting default-storageclass=true in profile "ha-434755"
I0919 22:24:46.940526 203160 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "ha-434755"
I0919 22:24:46.940484 203160 addons.go:238] Setting addon storage-provisioner=true in "ha-434755"
I0919 22:24:46.940592 203160 config.go:182] Loaded profile config "ha-434755": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0919 22:24:46.940622 203160 host.go:66] Checking if "ha-434755" exists ...
I0919 22:24:46.940889 203160 cli_runner.go:164] Run: docker container inspect ha-434755 --format={{.State.Status}}
I0919 22:24:46.941141 203160 cli_runner.go:164] Run: docker container inspect ha-434755 --format={{.State.Status}}
I0919 22:24:46.961198 203160 kapi.go:59] client config for ha-434755: &rest.Config{Host:"https://192.168.49.254:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/client.crt", KeyFile:"/home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/client.key", CAFile:"/home/jenkins/minikube-integration/21594-142711/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(nil), CAData:[]uint8(nil), NextProtos:[]string(n
il)}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x27f4a00), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), WarningHandlerWithContext:rest.WarningHandlerWithContext(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
I0919 22:24:46.961822 203160 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false
I0919 22:24:46.961843 203160 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false
I0919 22:24:46.961849 203160 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false
I0919 22:24:46.961854 203160 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true
I0919 22:24:46.961858 203160 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false
I0919 22:24:46.961927 203160 cert_rotation.go:141] "Starting client certificate rotation controller" logger="tls-transport-cache"
I0919 22:24:46.962245 203160 addons.go:238] Setting addon default-storageclass=true in "ha-434755"
I0919 22:24:46.962289 203160 host.go:66] Checking if "ha-434755" exists ...
I0919 22:24:46.962659 203160 cli_runner.go:164] Run: docker container inspect ha-434755 --format={{.State.Status}}
I0919 22:24:46.962840 203160 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I0919 22:24:46.964064 203160 addons.go:435] installing /etc/kubernetes/addons/storage-provisioner.yaml
I0919 22:24:46.964085 203160 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I0919 22:24:46.964143 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755
I0919 22:24:46.980987 203160 addons.go:435] installing /etc/kubernetes/addons/storageclass.yaml
I0919 22:24:46.981012 203160 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I0919 22:24:46.981083 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755
I0919 22:24:46.985677 203160 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32783 SSHKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755/id_rsa Username:docker}
I0919 22:24:46.998945 203160 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32783 SSHKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755/id_rsa Username:docker}
I0919 22:24:47.020097 203160 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I0919 22:24:47.098011 203160 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I0919 22:24:47.110913 203160 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I0919 22:24:47.173952 203160 start.go:976] {"host.minikube.internal": 192.168.49.1} host record injected into CoreDNS's ConfigMap
I0919 22:24:47.362290 203160 out.go:179] * Enabled addons: storage-provisioner, default-storageclass
I0919 22:24:47.363580 203160 addons.go:514] duration metric: took 423.211287ms for enable addons: enabled=[storage-provisioner default-storageclass]
I0919 22:24:47.363630 203160 start.go:246] waiting for cluster config update ...
I0919 22:24:47.363647 203160 start.go:255] writing updated cluster config ...
I0919 22:24:47.364969 203160 out.go:203]
I0919 22:24:47.366064 203160 config.go:182] Loaded profile config "ha-434755": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0919 22:24:47.366127 203160 profile.go:143] Saving config to /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/config.json ...
I0919 22:24:47.367471 203160 out.go:179] * Starting "ha-434755-m02" control-plane node in "ha-434755" cluster
I0919 22:24:47.368387 203160 cache.go:123] Beginning downloading kic base image for docker with docker
I0919 22:24:47.369440 203160 out.go:179] * Pulling base image v0.0.48 ...
I0919 22:24:47.370378 203160 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime docker
I0919 22:24:47.370397 203160 cache.go:58] Caching tarball of preloaded images
I0919 22:24:47.370461 203160 image.go:81] Checking for gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local docker daemon
I0919 22:24:47.370513 203160 preload.go:172] Found /home/jenkins/minikube-integration/21594-142711/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-amd64.tar.lz4 in cache, skipping download
I0919 22:24:47.370529 203160 cache.go:61] Finished verifying existence of preloaded tar for v1.34.0 on docker
I0919 22:24:47.370620 203160 profile.go:143] Saving config to /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/config.json ...
I0919 22:24:47.391559 203160 image.go:100] Found gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local docker daemon, skipping pull
I0919 22:24:47.391581 203160 cache.go:147] gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 exists in daemon, skipping load
I0919 22:24:47.391603 203160 cache.go:232] Successfully downloaded all kic artifacts
I0919 22:24:47.391635 203160 start.go:360] acquireMachinesLock for ha-434755-m02: {Name:mk9ca5ab09eecc208a09b7d4c6860cdbcbbd1861 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0919 22:24:47.391801 203160 start.go:364] duration metric: took 141.515µs to acquireMachinesLock for "ha-434755-m02"
I0919 22:24:47.391835 203160 start.go:93] Provisioning new machine with config: &{Name:ha-434755 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:ha-434755 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerN
ames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true} {Name:m02 IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[]
MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name:m02 IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}
I0919 22:24:47.391926 203160 start.go:125] createHost starting for "m02" (driver="docker")
I0919 22:24:47.393797 203160 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I0919 22:24:47.393909 203160 start.go:159] libmachine.API.Create for "ha-434755" (driver="docker")
I0919 22:24:47.393934 203160 client.go:168] LocalClient.Create starting
I0919 22:24:47.393999 203160 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem
I0919 22:24:47.394037 203160 main.go:141] libmachine: Decoding PEM data...
I0919 22:24:47.394072 203160 main.go:141] libmachine: Parsing certificate...
I0919 22:24:47.394137 203160 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21594-142711/.minikube/certs/cert.pem
I0919 22:24:47.394163 203160 main.go:141] libmachine: Decoding PEM data...
I0919 22:24:47.394178 203160 main.go:141] libmachine: Parsing certificate...
I0919 22:24:47.394368 203160 cli_runner.go:164] Run: docker network inspect ha-434755 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0919 22:24:47.411751 203160 network_create.go:77] Found existing network {name:ha-434755 subnet:0xc0016fd680 gateway:[0 0 0 0 0 0 0 0 0 0 255 255 192 168 49 1] mtu:1500}
I0919 22:24:47.411805 203160 kic.go:121] calculated static IP "192.168.49.3" for the "ha-434755-m02" container
I0919 22:24:47.411877 203160 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0919 22:24:47.428826 203160 cli_runner.go:164] Run: docker volume create ha-434755-m02 --label name.minikube.sigs.k8s.io=ha-434755-m02 --label created_by.minikube.sigs.k8s.io=true
I0919 22:24:47.446551 203160 oci.go:103] Successfully created a docker volume ha-434755-m02
I0919 22:24:47.446629 203160 cli_runner.go:164] Run: docker run --rm --name ha-434755-m02-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=ha-434755-m02 --entrypoint /usr/bin/test -v ha-434755-m02:/var gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -d /var/lib
I0919 22:24:47.837811 203160 oci.go:107] Successfully prepared a docker volume ha-434755-m02
I0919 22:24:47.837861 203160 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime docker
I0919 22:24:47.837884 203160 kic.go:194] Starting extracting preloaded images to volume ...
I0919 22:24:47.837943 203160 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21594-142711/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v ha-434755-m02:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -I lz4 -xf /preloaded.tar -C /extractDir
I0919 22:24:51.165942 203160 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21594-142711/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v ha-434755-m02:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -I lz4 -xf /preloaded.tar -C /extractDir: (3.327954443s)
I0919 22:24:51.165985 203160 kic.go:203] duration metric: took 3.328094858s to extract preloaded images to volume ...
W0919 22:24:51.166081 203160 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
W0919 22:24:51.166111 203160 oci.go:252] Your kernel does not support CPU cfs period/quota or the cgroup is not mounted.
I0919 22:24:51.166151 203160 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0919 22:24:51.222283 203160 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname ha-434755-m02 --name ha-434755-m02 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=ha-434755-m02 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=ha-434755-m02 --network ha-434755 --ip 192.168.49.3 --volume ha-434755-m02:/var --security-opt apparmor=unconfined --memory=3072mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1
I0919 22:24:51.469867 203160 cli_runner.go:164] Run: docker container inspect ha-434755-m02 --format={{.State.Running}}
I0919 22:24:51.487954 203160 cli_runner.go:164] Run: docker container inspect ha-434755-m02 --format={{.State.Status}}
I0919 22:24:51.506846 203160 cli_runner.go:164] Run: docker exec ha-434755-m02 stat /var/lib/dpkg/alternatives/iptables
I0919 22:24:51.559220 203160 oci.go:144] the created container "ha-434755-m02" has a running status.
I0919 22:24:51.559254 203160 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755-m02/id_rsa...
I0919 22:24:51.766973 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755-m02/id_rsa.pub -> /home/docker/.ssh/authorized_keys
I0919 22:24:51.767017 203160 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755-m02/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0919 22:24:51.797620 203160 cli_runner.go:164] Run: docker container inspect ha-434755-m02 --format={{.State.Status}}
I0919 22:24:51.823671 203160 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0919 22:24:51.823693 203160 kic_runner.go:114] Args: [docker exec --privileged ha-434755-m02 chown docker:docker /home/docker/.ssh/authorized_keys]
I0919 22:24:51.878635 203160 cli_runner.go:164] Run: docker container inspect ha-434755-m02 --format={{.State.Status}}
I0919 22:24:51.902762 203160 machine.go:93] provisionDockerMachine start ...
I0919 22:24:51.902873 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755-m02
I0919 22:24:51.926268 203160 main.go:141] libmachine: Using SSH client type: native
I0919 22:24:51.926707 203160 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32788 <nil> <nil>}
I0919 22:24:51.926729 203160 main.go:141] libmachine: About to run SSH command:
hostname
I0919 22:24:52.076154 203160 main.go:141] libmachine: SSH cmd err, output: <nil>: ha-434755-m02
I0919 22:24:52.076188 203160 ubuntu.go:182] provisioning hostname "ha-434755-m02"
I0919 22:24:52.076259 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755-m02
I0919 22:24:52.099415 203160 main.go:141] libmachine: Using SSH client type: native
I0919 22:24:52.099841 203160 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32788 <nil> <nil>}
I0919 22:24:52.099873 203160 main.go:141] libmachine: About to run SSH command:
sudo hostname ha-434755-m02 && echo "ha-434755-m02" | sudo tee /etc/hostname
I0919 22:24:52.261548 203160 main.go:141] libmachine: SSH cmd err, output: <nil>: ha-434755-m02
I0919 22:24:52.261646 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755-m02
I0919 22:24:52.283406 203160 main.go:141] libmachine: Using SSH client type: native
I0919 22:24:52.283734 203160 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32788 <nil> <nil>}
I0919 22:24:52.283754 203160 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\sha-434755-m02' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 ha-434755-m02/g' /etc/hosts;
else
echo '127.0.1.1 ha-434755-m02' | sudo tee -a /etc/hosts;
fi
fi
I0919 22:24:52.428353 203160 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0919 22:24:52.428390 203160 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21594-142711/.minikube CaCertPath:/home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21594-142711/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21594-142711/.minikube}
I0919 22:24:52.428420 203160 ubuntu.go:190] setting up certificates
I0919 22:24:52.428441 203160 provision.go:84] configureAuth start
I0919 22:24:52.428536 203160 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-434755-m02
I0919 22:24:52.450885 203160 provision.go:143] copyHostCerts
I0919 22:24:52.450924 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem -> /home/jenkins/minikube-integration/21594-142711/.minikube/ca.pem
I0919 22:24:52.450961 203160 exec_runner.go:144] found /home/jenkins/minikube-integration/21594-142711/.minikube/ca.pem, removing ...
I0919 22:24:52.450971 203160 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21594-142711/.minikube/ca.pem
I0919 22:24:52.451027 203160 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21594-142711/.minikube/ca.pem (1078 bytes)
I0919 22:24:52.451115 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/cert.pem -> /home/jenkins/minikube-integration/21594-142711/.minikube/cert.pem
I0919 22:24:52.451140 203160 exec_runner.go:144] found /home/jenkins/minikube-integration/21594-142711/.minikube/cert.pem, removing ...
I0919 22:24:52.451145 203160 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21594-142711/.minikube/cert.pem
I0919 22:24:52.451185 203160 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21594-142711/.minikube/cert.pem (1123 bytes)
I0919 22:24:52.451248 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/key.pem -> /home/jenkins/minikube-integration/21594-142711/.minikube/key.pem
I0919 22:24:52.451272 203160 exec_runner.go:144] found /home/jenkins/minikube-integration/21594-142711/.minikube/key.pem, removing ...
I0919 22:24:52.451276 203160 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21594-142711/.minikube/key.pem
I0919 22:24:52.451301 203160 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21594-142711/.minikube/key.pem (1675 bytes)
I0919 22:24:52.451355 203160 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21594-142711/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca-key.pem org=jenkins.ha-434755-m02 san=[127.0.0.1 192.168.49.3 ha-434755-m02 localhost minikube]
I0919 22:24:52.822893 203160 provision.go:177] copyRemoteCerts
I0919 22:24:52.822975 203160 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0919 22:24:52.823015 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755-m02
I0919 22:24:52.844478 203160 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32788 SSHKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755-m02/id_rsa Username:docker}
I0919 22:24:52.949460 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem -> /etc/docker/ca.pem
I0919 22:24:52.949550 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I0919 22:24:52.985521 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/machines/server.pem -> /etc/docker/server.pem
I0919 22:24:52.985590 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/machines/server.pem --> /etc/docker/server.pem (1208 bytes)
I0919 22:24:53.015276 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/machines/server-key.pem -> /etc/docker/server-key.pem
I0919 22:24:53.015359 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I0919 22:24:53.043799 203160 provision.go:87] duration metric: took 615.336421ms to configureAuth
I0919 22:24:53.043834 203160 ubuntu.go:206] setting minikube options for container-runtime
I0919 22:24:53.044042 203160 config.go:182] Loaded profile config "ha-434755": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0919 22:24:53.044098 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755-m02
I0919 22:24:53.065294 203160 main.go:141] libmachine: Using SSH client type: native
I0919 22:24:53.065671 203160 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32788 <nil> <nil>}
I0919 22:24:53.065691 203160 main.go:141] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I0919 22:24:53.203158 203160 main.go:141] libmachine: SSH cmd err, output: <nil>: overlay
I0919 22:24:53.203193 203160 ubuntu.go:71] root file system type: overlay
I0919 22:24:53.203308 203160 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I0919 22:24:53.203367 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755-m02
I0919 22:24:53.220915 203160 main.go:141] libmachine: Using SSH client type: native
I0919 22:24:53.221235 203160 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32788 <nil> <nil>}
I0919 22:24:53.221346 203160 main.go:141] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
Environment="NO_PROXY=192.168.49.2"
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 \
-H fd:// --containerd=/run/containerd/containerd.sock \
-H unix:///var/run/docker.sock \
--default-ulimit=nofile=1048576:1048576 \
--tlsverify \
--tlscacert /etc/docker/ca.pem \
--tlscert /etc/docker/server.pem \
--tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I0919 22:24:53.374632 203160 main.go:141] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
Environment=NO_PROXY=192.168.49.2
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
I0919 22:24:53.374713 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755-m02
I0919 22:24:53.392460 203160 main.go:141] libmachine: Using SSH client type: native
I0919 22:24:53.392706 203160 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32788 <nil> <nil>}
I0919 22:24:53.392731 203160 main.go:141] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I0919 22:24:54.550785 203160 main.go:141] libmachine: SSH cmd err, output: <nil>: --- /lib/systemd/system/docker.service 2025-09-03 20:55:49.000000000 +0000
+++ /lib/systemd/system/docker.service.new 2025-09-19 22:24:53.372388319 +0000
@@ -9,23 +9,35 @@
[Service]
Type=notify
-# the default is not to use systemd for cgroups because the delegate issues still
-# exists and systemd currently does not support the cgroup feature set required
-# for containers run by docker
-ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
-ExecReload=/bin/kill -s HUP $MAINPID
-TimeoutStartSec=0
-RestartSec=2
Restart=always
+Environment=NO_PROXY=192.168.49.2
+
+
+# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
+# The base configuration already specifies an 'ExecStart=...' command. The first directive
+# here is to clear out that command inherited from the base configuration. Without this,
+# the command from the base configuration and the command specified here are treated as
+# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
+# will catch this invalid input and refuse to start the service with an error like:
+# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
+
+# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
+# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
+ExecStart=
+ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
+ExecReload=/bin/kill -s HUP $MAINPID
+
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
+LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
-# Comment TasksMax if your systemd version does not support it.
-# Only systemd 226 and above support this option.
+# Uncomment TasksMax if your systemd version supports it.
+# Only systemd 226 and above support this version.
TasksMax=infinity
+TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
Synchronizing state of docker.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable docker
I0919 22:24:54.550828 203160 machine.go:96] duration metric: took 2.648042096s to provisionDockerMachine
I0919 22:24:54.550847 203160 client.go:171] duration metric: took 7.156901293s to LocalClient.Create
I0919 22:24:54.550877 203160 start.go:167] duration metric: took 7.156965929s to libmachine.API.Create "ha-434755"
I0919 22:24:54.550892 203160 start.go:293] postStartSetup for "ha-434755-m02" (driver="docker")
I0919 22:24:54.550905 203160 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0919 22:24:54.550979 203160 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0919 22:24:54.551047 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755-m02
I0919 22:24:54.573731 203160 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32788 SSHKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755-m02/id_rsa Username:docker}
I0919 22:24:54.676450 203160 ssh_runner.go:195] Run: cat /etc/os-release
I0919 22:24:54.680626 203160 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0919 22:24:54.680660 203160 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0919 22:24:54.680669 203160 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0919 22:24:54.680678 203160 info.go:137] Remote host: Ubuntu 22.04.5 LTS
I0919 22:24:54.680695 203160 filesync.go:126] Scanning /home/jenkins/minikube-integration/21594-142711/.minikube/addons for local assets ...
I0919 22:24:54.680757 203160 filesync.go:126] Scanning /home/jenkins/minikube-integration/21594-142711/.minikube/files for local assets ...
I0919 22:24:54.680849 203160 filesync.go:149] local asset: /home/jenkins/minikube-integration/21594-142711/.minikube/files/etc/ssl/certs/1463352.pem -> 1463352.pem in /etc/ssl/certs
I0919 22:24:54.680863 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/files/etc/ssl/certs/1463352.pem -> /etc/ssl/certs/1463352.pem
I0919 22:24:54.680970 203160 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I0919 22:24:54.691341 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/files/etc/ssl/certs/1463352.pem --> /etc/ssl/certs/1463352.pem (1708 bytes)
I0919 22:24:54.722119 203160 start.go:296] duration metric: took 171.208879ms for postStartSetup
I0919 22:24:54.722583 203160 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-434755-m02
I0919 22:24:54.743611 203160 profile.go:143] Saving config to /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/config.json ...
I0919 22:24:54.743848 203160 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0919 22:24:54.743887 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755-m02
I0919 22:24:54.765985 203160 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32788 SSHKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755-m02/id_rsa Username:docker}
I0919 22:24:54.864692 203160 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0919 22:24:54.870738 203160 start.go:128] duration metric: took 7.478790821s to createHost
I0919 22:24:54.870767 203160 start.go:83] releasing machines lock for "ha-434755-m02", held for 7.478950053s
I0919 22:24:54.870847 203160 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-434755-m02
I0919 22:24:54.898999 203160 out.go:179] * Found network options:
I0919 22:24:54.900212 203160 out.go:179] - NO_PROXY=192.168.49.2
W0919 22:24:54.901275 203160 proxy.go:120] fail to check proxy env: Error ip not in block
W0919 22:24:54.901331 203160 proxy.go:120] fail to check proxy env: Error ip not in block
I0919 22:24:54.901436 203160 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0919 22:24:54.901515 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755-m02
I0919 22:24:54.901712 203160 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0919 22:24:54.901788 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755-m02
I0919 22:24:54.923297 203160 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32788 SSHKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755-m02/id_rsa Username:docker}
I0919 22:24:54.924737 203160 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32788 SSHKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755-m02/id_rsa Username:docker}
I0919 22:24:55.020889 203160 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I0919 22:24:55.117431 203160 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I0919 22:24:55.117543 203160 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0919 22:24:55.154058 203160 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I0919 22:24:55.154092 203160 start.go:495] detecting cgroup driver to use...
I0919 22:24:55.154128 203160 detect.go:190] detected "systemd" cgroup driver on host os
I0919 22:24:55.154249 203160 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0919 22:24:55.171125 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I0919 22:24:55.182699 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0919 22:24:55.193910 203160 containerd.go:146] configuring containerd to use "systemd" as cgroup driver...
I0919 22:24:55.193981 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I0919 22:24:55.206930 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0919 22:24:55.218445 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0919 22:24:55.229676 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0919 22:24:55.239797 203160 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0919 22:24:55.249561 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0919 22:24:55.261388 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0919 22:24:55.272063 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0919 22:24:55.285133 203160 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0919 22:24:55.294764 203160 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0919 22:24:55.304309 203160 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0919 22:24:55.385891 203160 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0919 22:24:55.483649 203160 start.go:495] detecting cgroup driver to use...
I0919 22:24:55.483704 203160 detect.go:190] detected "systemd" cgroup driver on host os
I0919 22:24:55.483771 203160 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I0919 22:24:55.498112 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I0919 22:24:55.511999 203160 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
I0919 22:24:55.531010 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I0919 22:24:55.547951 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0919 22:24:55.562055 203160 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I0919 22:24:55.582950 203160 ssh_runner.go:195] Run: which cri-dockerd
I0919 22:24:55.588111 203160 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I0919 22:24:55.600129 203160 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (192 bytes)
I0919 22:24:55.622263 203160 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I0919 22:24:55.715078 203160 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I0919 22:24:55.798019 203160 docker.go:575] configuring docker to use "systemd" as cgroup driver...
I0919 22:24:55.798075 203160 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (129 bytes)
I0919 22:24:55.821473 203160 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
I0919 22:24:55.835550 203160 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0919 22:24:55.921379 203160 ssh_runner.go:195] Run: sudo systemctl restart docker
I0919 22:24:56.663040 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I0919 22:24:56.676296 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I0919 22:24:56.691640 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0919 22:24:56.705621 203160 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I0919 22:24:56.790623 203160 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I0919 22:24:56.868190 203160 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0919 22:24:56.965154 203160 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I0919 22:24:56.986139 203160 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I0919 22:24:56.999297 203160 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0919 22:24:57.084263 203160 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I0919 22:24:57.171144 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0919 22:24:57.185630 203160 start.go:542] Will wait 60s for socket path /var/run/cri-dockerd.sock
I0919 22:24:57.185700 203160 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I0919 22:24:57.190173 203160 start.go:563] Will wait 60s for crictl version
I0919 22:24:57.190233 203160 ssh_runner.go:195] Run: which crictl
I0919 22:24:57.194000 203160 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0919 22:24:57.238791 203160 start.go:579] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 28.4.0
RuntimeApiVersion: v1
I0919 22:24:57.238870 203160 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0919 22:24:57.271275 203160 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0919 22:24:57.304909 203160 out.go:252] * Preparing Kubernetes v1.34.0 on Docker 28.4.0 ...
I0919 22:24:57.306146 203160 out.go:179] - env NO_PROXY=192.168.49.2
I0919 22:24:57.307257 203160 cli_runner.go:164] Run: docker network inspect ha-434755 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0919 22:24:57.328319 203160 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I0919 22:24:57.333877 203160 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0919 22:24:57.348827 203160 mustload.go:65] Loading cluster: ha-434755
I0919 22:24:57.349095 203160 config.go:182] Loaded profile config "ha-434755": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0919 22:24:57.349417 203160 cli_runner.go:164] Run: docker container inspect ha-434755 --format={{.State.Status}}
I0919 22:24:57.372031 203160 host.go:66] Checking if "ha-434755" exists ...
I0919 22:24:57.372263 203160 certs.go:68] Setting up /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755 for IP: 192.168.49.3
I0919 22:24:57.372273 203160 certs.go:194] generating shared ca certs ...
I0919 22:24:57.372289 203160 certs.go:226] acquiring lock for ca certs: {Name:mkc5df652d6204fd8687dfaaf83b02c6e10b58b2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:24:57.372399 203160 certs.go:235] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21594-142711/.minikube/ca.key
I0919 22:24:57.372434 203160 certs.go:235] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21594-142711/.minikube/proxy-client-ca.key
I0919 22:24:57.372443 203160 certs.go:256] generating profile certs ...
I0919 22:24:57.372523 203160 certs.go:359] skipping valid signed profile cert regeneration for "minikube-user": /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/client.key
I0919 22:24:57.372551 203160 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.key.be912a57
I0919 22:24:57.372569 203160 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.crt.be912a57 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.49.2 192.168.49.3 192.168.49.254]
I0919 22:24:57.438372 203160 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.crt.be912a57 ...
I0919 22:24:57.438407 203160 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.crt.be912a57: {Name:mk30b073ffbf49812fc1c5fc78a448cc1824100f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:24:57.438643 203160 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.key.be912a57 ...
I0919 22:24:57.438666 203160 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.key.be912a57: {Name:mk59c79ca511caeebb332978950944f46d4ce354 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:24:57.438796 203160 certs.go:381] copying /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.crt.be912a57 -> /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.crt
I0919 22:24:57.438979 203160 certs.go:385] copying /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.key.be912a57 -> /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.key
I0919 22:24:57.439158 203160 certs.go:359] skipping valid signed profile cert regeneration for "aggregator": /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/proxy-client.key
I0919 22:24:57.439184 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/ca.crt -> /var/lib/minikube/certs/ca.crt
I0919 22:24:57.439202 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/ca.key -> /var/lib/minikube/certs/ca.key
I0919 22:24:57.439220 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/proxy-client-ca.crt -> /var/lib/minikube/certs/proxy-client-ca.crt
I0919 22:24:57.439238 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/proxy-client-ca.key -> /var/lib/minikube/certs/proxy-client-ca.key
I0919 22:24:57.439256 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.crt -> /var/lib/minikube/certs/apiserver.crt
I0919 22:24:57.439273 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.key -> /var/lib/minikube/certs/apiserver.key
I0919 22:24:57.439294 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/proxy-client.crt -> /var/lib/minikube/certs/proxy-client.crt
I0919 22:24:57.439312 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/proxy-client.key -> /var/lib/minikube/certs/proxy-client.key
I0919 22:24:57.439376 203160 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/146335.pem (1338 bytes)
W0919 22:24:57.439458 203160 certs.go:480] ignoring /home/jenkins/minikube-integration/21594-142711/.minikube/certs/146335_empty.pem, impossibly tiny 0 bytes
I0919 22:24:57.439474 203160 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca-key.pem (1675 bytes)
I0919 22:24:57.439537 203160 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem (1078 bytes)
I0919 22:24:57.439573 203160 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/cert.pem (1123 bytes)
I0919 22:24:57.439608 203160 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/key.pem (1675 bytes)
I0919 22:24:57.439670 203160 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-142711/.minikube/files/etc/ssl/certs/1463352.pem (1708 bytes)
I0919 22:24:57.439716 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/files/etc/ssl/certs/1463352.pem -> /usr/share/ca-certificates/1463352.pem
I0919 22:24:57.439743 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/ca.crt -> /usr/share/ca-certificates/minikubeCA.pem
I0919 22:24:57.439759 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/146335.pem -> /usr/share/ca-certificates/146335.pem
I0919 22:24:57.439830 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755
I0919 22:24:57.462047 203160 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32783 SSHKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755/id_rsa Username:docker}
I0919 22:24:57.557856 203160 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/sa.pub
I0919 22:24:57.562525 203160 ssh_runner.go:447] scp /var/lib/minikube/certs/sa.pub --> memory (451 bytes)
I0919 22:24:57.578095 203160 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/sa.key
I0919 22:24:57.582466 203160 ssh_runner.go:447] scp /var/lib/minikube/certs/sa.key --> memory (1675 bytes)
I0919 22:24:57.599559 203160 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/front-proxy-ca.crt
I0919 22:24:57.603627 203160 ssh_runner.go:447] scp /var/lib/minikube/certs/front-proxy-ca.crt --> memory (1123 bytes)
I0919 22:24:57.618994 203160 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/front-proxy-ca.key
I0919 22:24:57.622912 203160 ssh_runner.go:447] scp /var/lib/minikube/certs/front-proxy-ca.key --> memory (1675 bytes)
I0919 22:24:57.638660 203160 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/etcd/ca.crt
I0919 22:24:57.643248 203160 ssh_runner.go:447] scp /var/lib/minikube/certs/etcd/ca.crt --> memory (1094 bytes)
I0919 22:24:57.660006 203160 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/etcd/ca.key
I0919 22:24:57.664313 203160 ssh_runner.go:447] scp /var/lib/minikube/certs/etcd/ca.key --> memory (1675 bytes)
I0919 22:24:57.680744 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0919 22:24:57.714036 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
I0919 22:24:57.747544 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0919 22:24:57.780943 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I0919 22:24:57.812353 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1436 bytes)
I0919 22:24:57.845693 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I0919 22:24:57.878130 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0919 22:24:57.911308 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I0919 22:24:57.946218 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/files/etc/ssl/certs/1463352.pem --> /usr/share/ca-certificates/1463352.pem (1708 bytes)
I0919 22:24:57.984297 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0919 22:24:58.017177 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/certs/146335.pem --> /usr/share/ca-certificates/146335.pem (1338 bytes)
I0919 22:24:58.049420 203160 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/sa.pub (451 bytes)
I0919 22:24:58.073963 203160 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/sa.key (1675 bytes)
I0919 22:24:58.097887 203160 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/front-proxy-ca.crt (1123 bytes)
I0919 22:24:58.122255 203160 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/front-proxy-ca.key (1675 bytes)
I0919 22:24:58.147967 203160 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/etcd/ca.crt (1094 bytes)
I0919 22:24:58.171849 203160 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/etcd/ca.key (1675 bytes)
I0919 22:24:58.195690 203160 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (744 bytes)
I0919 22:24:58.219698 203160 ssh_runner.go:195] Run: openssl version
I0919 22:24:58.227264 203160 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/1463352.pem && ln -fs /usr/share/ca-certificates/1463352.pem /etc/ssl/certs/1463352.pem"
I0919 22:24:58.240247 203160 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/1463352.pem
I0919 22:24:58.244702 203160 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Sep 19 22:20 /usr/share/ca-certificates/1463352.pem
I0919 22:24:58.244768 203160 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/1463352.pem
I0919 22:24:58.254189 203160 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/1463352.pem /etc/ssl/certs/3ec20f2e.0"
I0919 22:24:58.265745 203160 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0919 22:24:58.279180 203160 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0919 22:24:58.284030 203160 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Sep 19 22:15 /usr/share/ca-certificates/minikubeCA.pem
I0919 22:24:58.284084 203160 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0919 22:24:58.292591 203160 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0919 22:24:58.305819 203160 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/146335.pem && ln -fs /usr/share/ca-certificates/146335.pem /etc/ssl/certs/146335.pem"
I0919 22:24:58.318945 203160 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/146335.pem
I0919 22:24:58.323696 203160 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Sep 19 22:20 /usr/share/ca-certificates/146335.pem
I0919 22:24:58.323742 203160 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/146335.pem
I0919 22:24:58.333578 203160 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/146335.pem /etc/ssl/certs/51391683.0"
I0919 22:24:58.346835 203160 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0919 22:24:58.351013 203160 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I0919 22:24:58.351074 203160 kubeadm.go:926] updating node {m02 192.168.49.3 8443 v1.34.0 docker true true} ...
I0919 22:24:58.351194 203160 kubeadm.go:938] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=ha-434755-m02 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.3
[Install]
config:
{KubernetesVersion:v1.34.0 ClusterName:ha-434755 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0919 22:24:58.351227 203160 kube-vip.go:115] generating kube-vip config ...
I0919 22:24:58.351267 203160 ssh_runner.go:195] Run: sudo sh -c "lsmod | grep ip_vs"
I0919 22:24:58.367957 203160 kube-vip.go:163] giving up enabling control-plane load-balancing as ipvs kernel modules appears not to be available: sudo sh -c "lsmod | grep ip_vs": Process exited with status 1
stdout:
stderr:
I0919 22:24:58.368034 203160 kube-vip.go:137] kube-vip config:
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
name: kube-vip
namespace: kube-system
spec:
containers:
- args:
- manager
env:
- name: vip_arp
value: "true"
- name: port
value: "8443"
- name: vip_nodename
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: vip_interface
value: eth0
- name: vip_cidr
value: "32"
- name: dns_mode
value: first
- name: cp_enable
value: "true"
- name: cp_namespace
value: kube-system
- name: vip_leaderelection
value: "true"
- name: vip_leasename
value: plndr-cp-lock
- name: vip_leaseduration
value: "5"
- name: vip_renewdeadline
value: "3"
- name: vip_retryperiod
value: "1"
- name: address
value: 192.168.49.254
- name: prometheus_server
value: :2112
image: ghcr.io/kube-vip/kube-vip:v1.0.0
imagePullPolicy: IfNotPresent
name: kube-vip
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_RAW
volumeMounts:
- mountPath: /etc/kubernetes/admin.conf
name: kubeconfig
hostAliases:
- hostnames:
- kubernetes
ip: 127.0.0.1
hostNetwork: true
volumes:
- hostPath:
path: "/etc/kubernetes/admin.conf"
name: kubeconfig
status: {}
I0919 22:24:58.368096 203160 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.0
I0919 22:24:58.379862 203160 binaries.go:44] Found k8s binaries, skipping transfer
I0919 22:24:58.379941 203160 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /etc/kubernetes/manifests
I0919 22:24:58.392276 203160 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (312 bytes)
I0919 22:24:58.417444 203160 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0919 22:24:58.442669 203160 ssh_runner.go:362] scp memory --> /etc/kubernetes/manifests/kube-vip.yaml (1358 bytes)
I0919 22:24:58.468697 203160 ssh_runner.go:195] Run: grep 192.168.49.254 control-plane.minikube.internal$ /etc/hosts
I0919 22:24:58.473305 203160 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.254 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0919 22:24:58.487646 203160 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0919 22:24:58.578606 203160 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0919 22:24:58.608451 203160 host.go:66] Checking if "ha-434755" exists ...
I0919 22:24:58.608749 203160 start.go:317] joinCluster: &{Name:ha-434755 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:ha-434755 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[]
DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true} {Name:m02 IP:192.168.49.3 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0
MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0919 22:24:58.608859 203160 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm token create --print-join-command --ttl=0"
I0919 22:24:58.608912 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755
I0919 22:24:58.632792 203160 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32783 SSHKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755/id_rsa Username:docker}
I0919 22:24:58.802805 203160 start.go:343] trying to join control-plane node "m02" to cluster: &{Name:m02 IP:192.168.49.3 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}
I0919 22:24:58.802874 203160 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm join control-plane.minikube.internal:8443 --token b4953v.b0t4y42p8a3t0277 --discovery-token-ca-cert-hash sha256:6e34938835ca5de20dcd743043ff221a1493ef970b34561f39a513839570935a --ignore-preflight-errors=all --cri-socket unix:///var/run/cri-dockerd.sock --node-name=ha-434755-m02 --control-plane --apiserver-advertise-address=192.168.49.3 --apiserver-bind-port=8443"
I0919 22:25:17.080561 203160 ssh_runner.go:235] Completed: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm join control-plane.minikube.internal:8443 --token b4953v.b0t4y42p8a3t0277 --discovery-token-ca-cert-hash sha256:6e34938835ca5de20dcd743043ff221a1493ef970b34561f39a513839570935a --ignore-preflight-errors=all --cri-socket unix:///var/run/cri-dockerd.sock --node-name=ha-434755-m02 --control-plane --apiserver-advertise-address=192.168.49.3 --apiserver-bind-port=8443": (18.277615829s)
I0919 22:25:17.080625 203160 ssh_runner.go:195] Run: /bin/bash -c "sudo systemctl daemon-reload && sudo systemctl enable kubelet && sudo systemctl start kubelet"
I0919 22:25:17.341701 203160 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes ha-434755-m02 minikube.k8s.io/updated_at=2025_09_19T22_25_17_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=6e37ee63f758843bb5fe33c3a528c564c4b83d53 minikube.k8s.io/name=ha-434755 minikube.k8s.io/primary=false
I0919 22:25:17.424260 203160 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig taint nodes ha-434755-m02 node-role.kubernetes.io/control-plane:NoSchedule-
I0919 22:25:17.499697 203160 start.go:319] duration metric: took 18.890943143s to joinCluster
I0919 22:25:17.499790 203160 start.go:235] Will wait 6m0s for node &{Name:m02 IP:192.168.49.3 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}
I0919 22:25:17.500059 203160 config.go:182] Loaded profile config "ha-434755": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0919 22:25:17.501017 203160 out.go:179] * Verifying Kubernetes components...
I0919 22:25:17.502040 203160 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0919 22:25:17.615768 203160 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0919 22:25:17.630185 203160 kapi.go:59] client config for ha-434755: &rest.Config{Host:"https://192.168.49.254:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/client.crt", KeyFile:"/home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/client.key", CAFile:"/home/jenkins/minikube-integration/21594-142711/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(nil), CAData:[]uint8(nil), NextProtos:[]string(n
il)}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x27f4a00), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), WarningHandlerWithContext:rest.WarningHandlerWithContext(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
W0919 22:25:17.630259 203160 kubeadm.go:483] Overriding stale ClientConfig host https://192.168.49.254:8443 with https://192.168.49.2:8443
I0919 22:25:17.630522 203160 node_ready.go:35] waiting up to 6m0s for node "ha-434755-m02" to be "Ready" ...
I0919 22:25:17.639687 203160 node_ready.go:49] node "ha-434755-m02" is "Ready"
I0919 22:25:17.639715 203160 node_ready.go:38] duration metric: took 9.169272ms for node "ha-434755-m02" to be "Ready" ...
I0919 22:25:17.639733 203160 api_server.go:52] waiting for apiserver process to appear ...
I0919 22:25:17.639783 203160 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0919 22:25:17.654193 203160 api_server.go:72] duration metric: took 154.362028ms to wait for apiserver process to appear ...
I0919 22:25:17.654221 203160 api_server.go:88] waiting for apiserver healthz status ...
I0919 22:25:17.654246 203160 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8443/healthz ...
I0919 22:25:17.658704 203160 api_server.go:279] https://192.168.49.2:8443/healthz returned 200:
ok
I0919 22:25:17.659870 203160 api_server.go:141] control plane version: v1.34.0
I0919 22:25:17.659894 203160 api_server.go:131] duration metric: took 5.665643ms to wait for apiserver health ...
I0919 22:25:17.659902 203160 system_pods.go:43] waiting for kube-system pods to appear ...
I0919 22:25:17.664793 203160 system_pods.go:59] 18 kube-system pods found
I0919 22:25:17.664839 203160 system_pods.go:61] "coredns-66bc5c9577-4lmln" [0f31e1cc-6bbb-4987-93c7-48e61288b609] Running
I0919 22:25:17.664851 203160 system_pods.go:61] "coredns-66bc5c9577-w8trg" [54431fee-554c-4c3c-9c81-d779981d36db] Running
I0919 22:25:17.664856 203160 system_pods.go:61] "etcd-ha-434755" [efa4db41-3739-45d6-ada5-d66dd5b82f46] Running
I0919 22:25:17.664862 203160 system_pods.go:61] "etcd-ha-434755-m02" [c47d7da8-6337-4062-a7d1-707ebc8f4df5] Running
I0919 22:25:17.664875 203160 system_pods.go:61] "kindnet-74q9s" [06bab6e9-ad22-4651-947e-723307c31d04] Pending / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni])
I0919 22:25:17.664883 203160 system_pods.go:61] "kindnet-djvx4" [dd2c97ac-215c-4657-a3af-bf74603285af] Running
I0919 22:25:17.664891 203160 system_pods.go:61] "kube-apiserver-ha-434755" [fdcd2f64-6b9f-40ed-be07-24beef072bca] Running
I0919 22:25:17.664903 203160 system_pods.go:61] "kube-apiserver-ha-434755-m02" [bcc4bd8e-7086-4034-94f8-865e02212e7b] Running
I0919 22:25:17.664909 203160 system_pods.go:61] "kube-controller-manager-ha-434755" [66066c78-f094-492d-9c71-a683cccd45a0] Running
I0919 22:25:17.664921 203160 system_pods.go:61] "kube-controller-manager-ha-434755-m02" [290b348b-6c1a-4891-990b-c943066ab212] Running
I0919 22:25:17.664931 203160 system_pods.go:61] "kube-proxy-4cnsm" [a477a521-e24b-449d-854f-c873cb517164] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy])
I0919 22:25:17.664938 203160 system_pods.go:61] "kube-proxy-gzpg8" [9d9843d9-c2ca-4751-8af5-f8fc91cf07c9] Running
I0919 22:25:17.664946 203160 system_pods.go:61] "kube-proxy-tzxjp" [68f449c9-12dc-40e2-9d22-a0c067962cb9] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy])
I0919 22:25:17.664954 203160 system_pods.go:61] "kube-scheduler-ha-434755" [593d9f5b-40f3-47b7-aef2-b25348983754] Running
I0919 22:25:17.664962 203160 system_pods.go:61] "kube-scheduler-ha-434755-m02" [34109527-5e07-415c-9bfc-d500d75092ca] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I0919 22:25:17.664969 203160 system_pods.go:61] "kube-vip-ha-434755" [eb65f5df-597d-4d36-b4c4-e33b1c1a6b35] Running
I0919 22:25:17.664975 203160 system_pods.go:61] "kube-vip-ha-434755-m02" [30071515-3665-4872-a66b-3d8ddccb0cae] Running
I0919 22:25:17.664981 203160 system_pods.go:61] "storage-provisioner" [fb950ab4-a515-4298-b7f0-e01d6290af75] Running
I0919 22:25:17.664991 203160 system_pods.go:74] duration metric: took 5.081378ms to wait for pod list to return data ...
I0919 22:25:17.665004 203160 default_sa.go:34] waiting for default service account to be created ...
I0919 22:25:17.668317 203160 default_sa.go:45] found service account: "default"
I0919 22:25:17.668340 203160 default_sa.go:55] duration metric: took 3.328321ms for default service account to be created ...
I0919 22:25:17.668351 203160 system_pods.go:116] waiting for k8s-apps to be running ...
I0919 22:25:17.673137 203160 system_pods.go:86] 18 kube-system pods found
I0919 22:25:17.673173 203160 system_pods.go:89] "coredns-66bc5c9577-4lmln" [0f31e1cc-6bbb-4987-93c7-48e61288b609] Running
I0919 22:25:17.673190 203160 system_pods.go:89] "coredns-66bc5c9577-w8trg" [54431fee-554c-4c3c-9c81-d779981d36db] Running
I0919 22:25:17.673196 203160 system_pods.go:89] "etcd-ha-434755" [efa4db41-3739-45d6-ada5-d66dd5b82f46] Running
I0919 22:25:17.673202 203160 system_pods.go:89] "etcd-ha-434755-m02" [c47d7da8-6337-4062-a7d1-707ebc8f4df5] Running
I0919 22:25:17.673216 203160 system_pods.go:89] "kindnet-74q9s" [06bab6e9-ad22-4651-947e-723307c31d04] Pending / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni])
I0919 22:25:17.673225 203160 system_pods.go:89] "kindnet-djvx4" [dd2c97ac-215c-4657-a3af-bf74603285af] Running
I0919 22:25:17.673232 203160 system_pods.go:89] "kube-apiserver-ha-434755" [fdcd2f64-6b9f-40ed-be07-24beef072bca] Running
I0919 22:25:17.673239 203160 system_pods.go:89] "kube-apiserver-ha-434755-m02" [bcc4bd8e-7086-4034-94f8-865e02212e7b] Running
I0919 22:25:17.673245 203160 system_pods.go:89] "kube-controller-manager-ha-434755" [66066c78-f094-492d-9c71-a683cccd45a0] Running
I0919 22:25:17.673253 203160 system_pods.go:89] "kube-controller-manager-ha-434755-m02" [290b348b-6c1a-4891-990b-c943066ab212] Running
I0919 22:25:17.673261 203160 system_pods.go:89] "kube-proxy-4cnsm" [a477a521-e24b-449d-854f-c873cb517164] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy])
I0919 22:25:17.673269 203160 system_pods.go:89] "kube-proxy-gzpg8" [9d9843d9-c2ca-4751-8af5-f8fc91cf07c9] Running
I0919 22:25:17.673277 203160 system_pods.go:89] "kube-proxy-tzxjp" [68f449c9-12dc-40e2-9d22-a0c067962cb9] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy])
I0919 22:25:17.673285 203160 system_pods.go:89] "kube-scheduler-ha-434755" [593d9f5b-40f3-47b7-aef2-b25348983754] Running
I0919 22:25:17.673306 203160 system_pods.go:89] "kube-scheduler-ha-434755-m02" [34109527-5e07-415c-9bfc-d500d75092ca] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I0919 22:25:17.673316 203160 system_pods.go:89] "kube-vip-ha-434755" [eb65f5df-597d-4d36-b4c4-e33b1c1a6b35] Running
I0919 22:25:17.673321 203160 system_pods.go:89] "kube-vip-ha-434755-m02" [30071515-3665-4872-a66b-3d8ddccb0cae] Running
I0919 22:25:17.673325 203160 system_pods.go:89] "storage-provisioner" [fb950ab4-a515-4298-b7f0-e01d6290af75] Running
I0919 22:25:17.673334 203160 system_pods.go:126] duration metric: took 4.976103ms to wait for k8s-apps to be running ...
I0919 22:25:17.673343 203160 system_svc.go:44] waiting for kubelet service to be running ....
I0919 22:25:17.673397 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I0919 22:25:17.689275 203160 system_svc.go:56] duration metric: took 15.922768ms WaitForService to wait for kubelet
I0919 22:25:17.689301 203160 kubeadm.go:578] duration metric: took 189.477657ms to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0919 22:25:17.689322 203160 node_conditions.go:102] verifying NodePressure condition ...
I0919 22:25:17.693097 203160 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I0919 22:25:17.693135 203160 node_conditions.go:123] node cpu capacity is 8
I0919 22:25:17.693151 203160 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I0919 22:25:17.693156 203160 node_conditions.go:123] node cpu capacity is 8
I0919 22:25:17.693162 203160 node_conditions.go:105] duration metric: took 3.833677ms to run NodePressure ...
I0919 22:25:17.693179 203160 start.go:241] waiting for startup goroutines ...
I0919 22:25:17.693211 203160 start.go:255] writing updated cluster config ...
I0919 22:25:17.695103 203160 out.go:203]
I0919 22:25:17.698818 203160 config.go:182] Loaded profile config "ha-434755": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0919 22:25:17.698972 203160 profile.go:143] Saving config to /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/config.json ...
I0919 22:25:17.700470 203160 out.go:179] * Starting "ha-434755-m03" control-plane node in "ha-434755" cluster
I0919 22:25:17.701508 203160 cache.go:123] Beginning downloading kic base image for docker with docker
I0919 22:25:17.702525 203160 out.go:179] * Pulling base image v0.0.48 ...
I0919 22:25:17.703600 203160 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime docker
I0919 22:25:17.703627 203160 cache.go:58] Caching tarball of preloaded images
I0919 22:25:17.703660 203160 image.go:81] Checking for gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local docker daemon
I0919 22:25:17.703750 203160 preload.go:172] Found /home/jenkins/minikube-integration/21594-142711/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-amd64.tar.lz4 in cache, skipping download
I0919 22:25:17.703762 203160 cache.go:61] Finished verifying existence of preloaded tar for v1.34.0 on docker
I0919 22:25:17.703897 203160 profile.go:143] Saving config to /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/config.json ...
I0919 22:25:17.728614 203160 image.go:100] Found gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local docker daemon, skipping pull
I0919 22:25:17.728640 203160 cache.go:147] gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 exists in daemon, skipping load
I0919 22:25:17.728661 203160 cache.go:232] Successfully downloaded all kic artifacts
I0919 22:25:17.728696 203160 start.go:360] acquireMachinesLock for ha-434755-m03: {Name:mk4499ef8414fba131017fb3f66e00435d0a646b Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0919 22:25:17.728819 203160 start.go:364] duration metric: took 98.455µs to acquireMachinesLock for "ha-434755-m03"
I0919 22:25:17.728853 203160 start.go:93] Provisioning new machine with config: &{Name:ha-434755 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:ha-434755 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerN
ames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true} {Name:m02 IP:192.168.49.3 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true} {Name:m03 IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:false efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:fals
e kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:false storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetP
ath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name:m03 IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}
I0919 22:25:17.728991 203160 start.go:125] createHost starting for "m03" (driver="docker")
I0919 22:25:17.732545 203160 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I0919 22:25:17.732672 203160 start.go:159] libmachine.API.Create for "ha-434755" (driver="docker")
I0919 22:25:17.732707 203160 client.go:168] LocalClient.Create starting
I0919 22:25:17.732782 203160 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem
I0919 22:25:17.732823 203160 main.go:141] libmachine: Decoding PEM data...
I0919 22:25:17.732845 203160 main.go:141] libmachine: Parsing certificate...
I0919 22:25:17.732912 203160 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21594-142711/.minikube/certs/cert.pem
I0919 22:25:17.732939 203160 main.go:141] libmachine: Decoding PEM data...
I0919 22:25:17.732958 203160 main.go:141] libmachine: Parsing certificate...
I0919 22:25:17.733232 203160 cli_runner.go:164] Run: docker network inspect ha-434755 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0919 22:25:17.751632 203160 network_create.go:77] Found existing network {name:ha-434755 subnet:0xc00219e2a0 gateway:[0 0 0 0 0 0 0 0 0 0 255 255 192 168 49 1] mtu:1500}
I0919 22:25:17.751674 203160 kic.go:121] calculated static IP "192.168.49.4" for the "ha-434755-m03" container
I0919 22:25:17.751747 203160 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0919 22:25:17.770069 203160 cli_runner.go:164] Run: docker volume create ha-434755-m03 --label name.minikube.sigs.k8s.io=ha-434755-m03 --label created_by.minikube.sigs.k8s.io=true
I0919 22:25:17.789823 203160 oci.go:103] Successfully created a docker volume ha-434755-m03
I0919 22:25:17.789902 203160 cli_runner.go:164] Run: docker run --rm --name ha-434755-m03-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=ha-434755-m03 --entrypoint /usr/bin/test -v ha-434755-m03:/var gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -d /var/lib
I0919 22:25:18.164388 203160 oci.go:107] Successfully prepared a docker volume ha-434755-m03
I0919 22:25:18.164435 203160 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime docker
I0919 22:25:18.164462 203160 kic.go:194] Starting extracting preloaded images to volume ...
I0919 22:25:18.164543 203160 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21594-142711/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v ha-434755-m03:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -I lz4 -xf /preloaded.tar -C /extractDir
I0919 22:25:21.103950 203160 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21594-142711/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v ha-434755-m03:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -I lz4 -xf /preloaded.tar -C /extractDir: (2.939357533s)
I0919 22:25:21.103986 203160 kic.go:203] duration metric: took 2.939518923s to extract preloaded images to volume ...
W0919 22:25:21.104096 203160 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
W0919 22:25:21.104151 203160 oci.go:252] Your kernel does not support CPU cfs period/quota or the cgroup is not mounted.
I0919 22:25:21.104202 203160 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0919 22:25:21.177154 203160 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname ha-434755-m03 --name ha-434755-m03 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=ha-434755-m03 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=ha-434755-m03 --network ha-434755 --ip 192.168.49.4 --volume ha-434755-m03:/var --security-opt apparmor=unconfined --memory=3072mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1
I0919 22:25:21.498634 203160 cli_runner.go:164] Run: docker container inspect ha-434755-m03 --format={{.State.Running}}
I0919 22:25:21.522257 203160 cli_runner.go:164] Run: docker container inspect ha-434755-m03 --format={{.State.Status}}
I0919 22:25:21.545087 203160 cli_runner.go:164] Run: docker exec ha-434755-m03 stat /var/lib/dpkg/alternatives/iptables
I0919 22:25:21.601217 203160 oci.go:144] the created container "ha-434755-m03" has a running status.
I0919 22:25:21.601289 203160 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755-m03/id_rsa...
I0919 22:25:21.834101 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755-m03/id_rsa.pub -> /home/docker/.ssh/authorized_keys
I0919 22:25:21.834162 203160 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755-m03/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0919 22:25:21.931924 203160 cli_runner.go:164] Run: docker container inspect ha-434755-m03 --format={{.State.Status}}
I0919 22:25:21.958463 203160 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0919 22:25:21.958488 203160 kic_runner.go:114] Args: [docker exec --privileged ha-434755-m03 chown docker:docker /home/docker/.ssh/authorized_keys]
I0919 22:25:22.013210 203160 cli_runner.go:164] Run: docker container inspect ha-434755-m03 --format={{.State.Status}}
I0919 22:25:22.034113 203160 machine.go:93] provisionDockerMachine start ...
I0919 22:25:22.034216 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755-m03
I0919 22:25:22.055636 203160 main.go:141] libmachine: Using SSH client type: native
I0919 22:25:22.055967 203160 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32793 <nil> <nil>}
I0919 22:25:22.055993 203160 main.go:141] libmachine: About to run SSH command:
hostname
I0919 22:25:22.197369 203160 main.go:141] libmachine: SSH cmd err, output: <nil>: ha-434755-m03
I0919 22:25:22.197398 203160 ubuntu.go:182] provisioning hostname "ha-434755-m03"
I0919 22:25:22.197459 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755-m03
I0919 22:25:22.216027 203160 main.go:141] libmachine: Using SSH client type: native
I0919 22:25:22.216285 203160 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32793 <nil> <nil>}
I0919 22:25:22.216301 203160 main.go:141] libmachine: About to run SSH command:
sudo hostname ha-434755-m03 && echo "ha-434755-m03" | sudo tee /etc/hostname
I0919 22:25:22.368448 203160 main.go:141] libmachine: SSH cmd err, output: <nil>: ha-434755-m03
I0919 22:25:22.368549 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755-m03
I0919 22:25:22.386972 203160 main.go:141] libmachine: Using SSH client type: native
I0919 22:25:22.387278 203160 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32793 <nil> <nil>}
I0919 22:25:22.387304 203160 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\sha-434755-m03' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 ha-434755-m03/g' /etc/hosts;
else
echo '127.0.1.1 ha-434755-m03' | sudo tee -a /etc/hosts;
fi
fi
I0919 22:25:22.524292 203160 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0919 22:25:22.524331 203160 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21594-142711/.minikube CaCertPath:/home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21594-142711/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21594-142711/.minikube}
I0919 22:25:22.524354 203160 ubuntu.go:190] setting up certificates
I0919 22:25:22.524368 203160 provision.go:84] configureAuth start
I0919 22:25:22.524434 203160 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-434755-m03
I0919 22:25:22.541928 203160 provision.go:143] copyHostCerts
I0919 22:25:22.541971 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/cert.pem -> /home/jenkins/minikube-integration/21594-142711/.minikube/cert.pem
I0919 22:25:22.542000 203160 exec_runner.go:144] found /home/jenkins/minikube-integration/21594-142711/.minikube/cert.pem, removing ...
I0919 22:25:22.542009 203160 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21594-142711/.minikube/cert.pem
I0919 22:25:22.542076 203160 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21594-142711/.minikube/cert.pem (1123 bytes)
I0919 22:25:22.542159 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/key.pem -> /home/jenkins/minikube-integration/21594-142711/.minikube/key.pem
I0919 22:25:22.542180 203160 exec_runner.go:144] found /home/jenkins/minikube-integration/21594-142711/.minikube/key.pem, removing ...
I0919 22:25:22.542186 203160 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21594-142711/.minikube/key.pem
I0919 22:25:22.542213 203160 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21594-142711/.minikube/key.pem (1675 bytes)
I0919 22:25:22.542310 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem -> /home/jenkins/minikube-integration/21594-142711/.minikube/ca.pem
I0919 22:25:22.542334 203160 exec_runner.go:144] found /home/jenkins/minikube-integration/21594-142711/.minikube/ca.pem, removing ...
I0919 22:25:22.542337 203160 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21594-142711/.minikube/ca.pem
I0919 22:25:22.542362 203160 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21594-142711/.minikube/ca.pem (1078 bytes)
I0919 22:25:22.542414 203160 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21594-142711/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca-key.pem org=jenkins.ha-434755-m03 san=[127.0.0.1 192.168.49.4 ha-434755-m03 localhost minikube]
I0919 22:25:22.877628 203160 provision.go:177] copyRemoteCerts
I0919 22:25:22.877694 203160 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0919 22:25:22.877741 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755-m03
I0919 22:25:22.896937 203160 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32793 SSHKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755-m03/id_rsa Username:docker}
I0919 22:25:22.995146 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/machines/server-key.pem -> /etc/docker/server-key.pem
I0919 22:25:22.995210 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I0919 22:25:23.022236 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem -> /etc/docker/ca.pem
I0919 22:25:23.022316 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I0919 22:25:23.047563 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/machines/server.pem -> /etc/docker/server.pem
I0919 22:25:23.047631 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/machines/server.pem --> /etc/docker/server.pem (1208 bytes)
I0919 22:25:23.072319 203160 provision.go:87] duration metric: took 547.932448ms to configureAuth
I0919 22:25:23.072353 203160 ubuntu.go:206] setting minikube options for container-runtime
I0919 22:25:23.072625 203160 config.go:182] Loaded profile config "ha-434755": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0919 22:25:23.072688 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755-m03
I0919 22:25:23.090959 203160 main.go:141] libmachine: Using SSH client type: native
I0919 22:25:23.091171 203160 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32793 <nil> <nil>}
I0919 22:25:23.091183 203160 main.go:141] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I0919 22:25:23.228223 203160 main.go:141] libmachine: SSH cmd err, output: <nil>: overlay
I0919 22:25:23.228253 203160 ubuntu.go:71] root file system type: overlay
I0919 22:25:23.228422 203160 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I0919 22:25:23.228509 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755-m03
I0919 22:25:23.246883 203160 main.go:141] libmachine: Using SSH client type: native
I0919 22:25:23.247100 203160 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32793 <nil> <nil>}
I0919 22:25:23.247170 203160 main.go:141] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
Environment="NO_PROXY=192.168.49.2"
Environment="NO_PROXY=192.168.49.2,192.168.49.3"
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 \
-H fd:// --containerd=/run/containerd/containerd.sock \
-H unix:///var/run/docker.sock \
--default-ulimit=nofile=1048576:1048576 \
--tlsverify \
--tlscacert /etc/docker/ca.pem \
--tlscert /etc/docker/server.pem \
--tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I0919 22:25:23.398060 203160 main.go:141] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
Environment=NO_PROXY=192.168.49.2
Environment=NO_PROXY=192.168.49.2,192.168.49.3
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
I0919 22:25:23.398137 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755-m03
I0919 22:25:23.415663 203160 main.go:141] libmachine: Using SSH client type: native
I0919 22:25:23.415892 203160 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32793 <nil> <nil>}
I0919 22:25:23.415918 203160 main.go:141] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I0919 22:25:24.567023 203160 main.go:141] libmachine: SSH cmd err, output: <nil>: --- /lib/systemd/system/docker.service 2025-09-03 20:55:49.000000000 +0000
+++ /lib/systemd/system/docker.service.new 2025-09-19 22:25:23.396311399 +0000
@@ -9,23 +9,36 @@
[Service]
Type=notify
-# the default is not to use systemd for cgroups because the delegate issues still
-# exists and systemd currently does not support the cgroup feature set required
-# for containers run by docker
-ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
-ExecReload=/bin/kill -s HUP $MAINPID
-TimeoutStartSec=0
-RestartSec=2
Restart=always
+Environment=NO_PROXY=192.168.49.2
+Environment=NO_PROXY=192.168.49.2,192.168.49.3
+
+
+# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
+# The base configuration already specifies an 'ExecStart=...' command. The first directive
+# here is to clear out that command inherited from the base configuration. Without this,
+# the command from the base configuration and the command specified here are treated as
+# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
+# will catch this invalid input and refuse to start the service with an error like:
+# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
+
+# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
+# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
+ExecStart=
+ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
+ExecReload=/bin/kill -s HUP $MAINPID
+
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
+LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
-# Comment TasksMax if your systemd version does not support it.
-# Only systemd 226 and above support this option.
+# Uncomment TasksMax if your systemd version supports it.
+# Only systemd 226 and above support this version.
TasksMax=infinity
+TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
Synchronizing state of docker.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable docker
I0919 22:25:24.567060 203160 machine.go:96] duration metric: took 2.53292644s to provisionDockerMachine
I0919 22:25:24.567072 203160 client.go:171] duration metric: took 6.83435882s to LocalClient.Create
I0919 22:25:24.567092 203160 start.go:167] duration metric: took 6.834424553s to libmachine.API.Create "ha-434755"
I0919 22:25:24.567099 203160 start.go:293] postStartSetup for "ha-434755-m03" (driver="docker")
I0919 22:25:24.567108 203160 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0919 22:25:24.567161 203160 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0919 22:25:24.567201 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755-m03
I0919 22:25:24.584782 203160 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32793 SSHKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755-m03/id_rsa Username:docker}
I0919 22:25:24.683573 203160 ssh_runner.go:195] Run: cat /etc/os-release
I0919 22:25:24.686859 203160 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0919 22:25:24.686883 203160 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0919 22:25:24.686890 203160 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0919 22:25:24.686896 203160 info.go:137] Remote host: Ubuntu 22.04.5 LTS
I0919 22:25:24.686906 203160 filesync.go:126] Scanning /home/jenkins/minikube-integration/21594-142711/.minikube/addons for local assets ...
I0919 22:25:24.686958 203160 filesync.go:126] Scanning /home/jenkins/minikube-integration/21594-142711/.minikube/files for local assets ...
I0919 22:25:24.687030 203160 filesync.go:149] local asset: /home/jenkins/minikube-integration/21594-142711/.minikube/files/etc/ssl/certs/1463352.pem -> 1463352.pem in /etc/ssl/certs
I0919 22:25:24.687040 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/files/etc/ssl/certs/1463352.pem -> /etc/ssl/certs/1463352.pem
I0919 22:25:24.687116 203160 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I0919 22:25:24.695639 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/files/etc/ssl/certs/1463352.pem --> /etc/ssl/certs/1463352.pem (1708 bytes)
I0919 22:25:24.721360 203160 start.go:296] duration metric: took 154.24817ms for postStartSetup
I0919 22:25:24.721702 203160 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-434755-m03
I0919 22:25:24.739596 203160 profile.go:143] Saving config to /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/config.json ...
I0919 22:25:24.739824 203160 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0919 22:25:24.739863 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755-m03
I0919 22:25:24.756921 203160 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32793 SSHKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755-m03/id_rsa Username:docker}
I0919 22:25:24.848110 203160 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0919 22:25:24.852461 203160 start.go:128] duration metric: took 7.123445347s to createHost
I0919 22:25:24.852485 203160 start.go:83] releasing machines lock for "ha-434755-m03", held for 7.123651539s
I0919 22:25:24.852564 203160 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-434755-m03
I0919 22:25:24.871364 203160 out.go:179] * Found network options:
I0919 22:25:24.872460 203160 out.go:179] - NO_PROXY=192.168.49.2,192.168.49.3
W0919 22:25:24.873469 203160 proxy.go:120] fail to check proxy env: Error ip not in block
W0919 22:25:24.873491 203160 proxy.go:120] fail to check proxy env: Error ip not in block
W0919 22:25:24.873531 203160 proxy.go:120] fail to check proxy env: Error ip not in block
W0919 22:25:24.873550 203160 proxy.go:120] fail to check proxy env: Error ip not in block
I0919 22:25:24.873614 203160 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0919 22:25:24.873651 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755-m03
I0919 22:25:24.873674 203160 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0919 22:25:24.873726 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755-m03
I0919 22:25:24.891768 203160 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32793 SSHKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755-m03/id_rsa Username:docker}
I0919 22:25:24.892067 203160 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32793 SSHKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755-m03/id_rsa Username:docker}
I0919 22:25:25.055623 203160 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I0919 22:25:25.084377 203160 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I0919 22:25:25.084463 203160 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0919 22:25:25.110916 203160 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I0919 22:25:25.110954 203160 start.go:495] detecting cgroup driver to use...
I0919 22:25:25.110987 203160 detect.go:190] detected "systemd" cgroup driver on host os
I0919 22:25:25.111095 203160 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0919 22:25:25.128062 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I0919 22:25:25.138541 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0919 22:25:25.147920 203160 containerd.go:146] configuring containerd to use "systemd" as cgroup driver...
I0919 22:25:25.147980 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I0919 22:25:25.158084 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0919 22:25:25.167726 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0919 22:25:25.177468 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0919 22:25:25.187066 203160 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0919 22:25:25.196074 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0919 22:25:25.205874 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0919 22:25:25.215655 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0919 22:25:25.225542 203160 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0919 22:25:25.233921 203160 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0919 22:25:25.241915 203160 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0919 22:25:25.307691 203160 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0919 22:25:25.379485 203160 start.go:495] detecting cgroup driver to use...
I0919 22:25:25.379559 203160 detect.go:190] detected "systemd" cgroup driver on host os
I0919 22:25:25.379617 203160 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I0919 22:25:25.392037 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I0919 22:25:25.402672 203160 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
I0919 22:25:25.417255 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I0919 22:25:25.428199 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0919 22:25:25.438890 203160 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I0919 22:25:25.454554 203160 ssh_runner.go:195] Run: which cri-dockerd
I0919 22:25:25.457748 203160 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I0919 22:25:25.467191 203160 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (192 bytes)
I0919 22:25:25.484961 203160 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I0919 22:25:25.554190 203160 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I0919 22:25:25.619726 203160 docker.go:575] configuring docker to use "systemd" as cgroup driver...
I0919 22:25:25.619771 203160 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (129 bytes)
I0919 22:25:25.638490 203160 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
I0919 22:25:25.649394 203160 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0919 22:25:25.718759 203160 ssh_runner.go:195] Run: sudo systemctl restart docker
I0919 22:25:26.508414 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I0919 22:25:26.521162 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I0919 22:25:26.532748 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0919 22:25:26.543940 203160 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I0919 22:25:26.612578 203160 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I0919 22:25:26.675793 203160 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0919 22:25:26.742908 203160 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I0919 22:25:26.767410 203160 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I0919 22:25:26.778129 203160 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0919 22:25:26.843785 203160 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I0919 22:25:26.914025 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0919 22:25:26.926481 203160 start.go:542] Will wait 60s for socket path /var/run/cri-dockerd.sock
I0919 22:25:26.926561 203160 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I0919 22:25:26.930135 203160 start.go:563] Will wait 60s for crictl version
I0919 22:25:26.930190 203160 ssh_runner.go:195] Run: which crictl
I0919 22:25:26.933448 203160 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0919 22:25:26.970116 203160 start.go:579] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 28.4.0
RuntimeApiVersion: v1
I0919 22:25:26.970186 203160 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0919 22:25:26.995443 203160 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0919 22:25:27.022587 203160 out.go:252] * Preparing Kubernetes v1.34.0 on Docker 28.4.0 ...
I0919 22:25:27.023535 203160 out.go:179] - env NO_PROXY=192.168.49.2
I0919 22:25:27.024458 203160 out.go:179] - env NO_PROXY=192.168.49.2,192.168.49.3
I0919 22:25:27.025398 203160 cli_runner.go:164] Run: docker network inspect ha-434755 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0919 22:25:27.041313 203160 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I0919 22:25:27.045217 203160 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0919 22:25:27.056734 203160 mustload.go:65] Loading cluster: ha-434755
I0919 22:25:27.056929 203160 config.go:182] Loaded profile config "ha-434755": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0919 22:25:27.057119 203160 cli_runner.go:164] Run: docker container inspect ha-434755 --format={{.State.Status}}
I0919 22:25:27.073694 203160 host.go:66] Checking if "ha-434755" exists ...
I0919 22:25:27.073923 203160 certs.go:68] Setting up /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755 for IP: 192.168.49.4
I0919 22:25:27.073935 203160 certs.go:194] generating shared ca certs ...
I0919 22:25:27.073947 203160 certs.go:226] acquiring lock for ca certs: {Name:mkc5df652d6204fd8687dfaaf83b02c6e10b58b2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:25:27.074070 203160 certs.go:235] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21594-142711/.minikube/ca.key
I0919 22:25:27.074110 203160 certs.go:235] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21594-142711/.minikube/proxy-client-ca.key
I0919 22:25:27.074119 203160 certs.go:256] generating profile certs ...
I0919 22:25:27.074189 203160 certs.go:359] skipping valid signed profile cert regeneration for "minikube-user": /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/client.key
I0919 22:25:27.074218 203160 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.key.fcdc46d6
I0919 22:25:27.074232 203160 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.crt.fcdc46d6 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.49.2 192.168.49.3 192.168.49.4 192.168.49.254]
I0919 22:25:27.130384 203160 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.crt.fcdc46d6 ...
I0919 22:25:27.130417 203160 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.crt.fcdc46d6: {Name:mke05473b288d96ff0a35c82b85fde4c8e83b40c Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:25:27.130606 203160 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.key.fcdc46d6 ...
I0919 22:25:27.130621 203160 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.key.fcdc46d6: {Name:mk192f98c5799773d19e5939501046d3123dfe7a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:25:27.130715 203160 certs.go:381] copying /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.crt.fcdc46d6 -> /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.crt
I0919 22:25:27.130866 203160 certs.go:385] copying /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.key.fcdc46d6 -> /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.key
I0919 22:25:27.131029 203160 certs.go:359] skipping valid signed profile cert regeneration for "aggregator": /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/proxy-client.key
I0919 22:25:27.131044 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/ca.crt -> /var/lib/minikube/certs/ca.crt
I0919 22:25:27.131061 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/ca.key -> /var/lib/minikube/certs/ca.key
I0919 22:25:27.131075 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/proxy-client-ca.crt -> /var/lib/minikube/certs/proxy-client-ca.crt
I0919 22:25:27.131089 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/proxy-client-ca.key -> /var/lib/minikube/certs/proxy-client-ca.key
I0919 22:25:27.131102 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.crt -> /var/lib/minikube/certs/apiserver.crt
I0919 22:25:27.131115 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.key -> /var/lib/minikube/certs/apiserver.key
I0919 22:25:27.131128 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/proxy-client.crt -> /var/lib/minikube/certs/proxy-client.crt
I0919 22:25:27.131141 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/proxy-client.key -> /var/lib/minikube/certs/proxy-client.key
I0919 22:25:27.131198 203160 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/146335.pem (1338 bytes)
W0919 22:25:27.131239 203160 certs.go:480] ignoring /home/jenkins/minikube-integration/21594-142711/.minikube/certs/146335_empty.pem, impossibly tiny 0 bytes
I0919 22:25:27.131248 203160 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca-key.pem (1675 bytes)
I0919 22:25:27.131275 203160 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem (1078 bytes)
I0919 22:25:27.131303 203160 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/cert.pem (1123 bytes)
I0919 22:25:27.131331 203160 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/key.pem (1675 bytes)
I0919 22:25:27.131380 203160 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-142711/.minikube/files/etc/ssl/certs/1463352.pem (1708 bytes)
I0919 22:25:27.131411 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/146335.pem -> /usr/share/ca-certificates/146335.pem
I0919 22:25:27.131428 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/files/etc/ssl/certs/1463352.pem -> /usr/share/ca-certificates/1463352.pem
I0919 22:25:27.131442 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/ca.crt -> /usr/share/ca-certificates/minikubeCA.pem
I0919 22:25:27.131523 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755
I0919 22:25:27.159068 203160 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32783 SSHKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755/id_rsa Username:docker}
I0919 22:25:27.248746 203160 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/sa.pub
I0919 22:25:27.252715 203160 ssh_runner.go:447] scp /var/lib/minikube/certs/sa.pub --> memory (451 bytes)
I0919 22:25:27.267211 203160 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/sa.key
I0919 22:25:27.270851 203160 ssh_runner.go:447] scp /var/lib/minikube/certs/sa.key --> memory (1675 bytes)
I0919 22:25:27.283028 203160 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/front-proxy-ca.crt
I0919 22:25:27.286477 203160 ssh_runner.go:447] scp /var/lib/minikube/certs/front-proxy-ca.crt --> memory (1123 bytes)
I0919 22:25:27.298415 203160 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/front-proxy-ca.key
I0919 22:25:27.301783 203160 ssh_runner.go:447] scp /var/lib/minikube/certs/front-proxy-ca.key --> memory (1675 bytes)
I0919 22:25:27.314834 203160 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/etcd/ca.crt
I0919 22:25:27.318008 203160 ssh_runner.go:447] scp /var/lib/minikube/certs/etcd/ca.crt --> memory (1094 bytes)
I0919 22:25:27.330473 203160 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/etcd/ca.key
I0919 22:25:27.333984 203160 ssh_runner.go:447] scp /var/lib/minikube/certs/etcd/ca.key --> memory (1675 bytes)
I0919 22:25:27.345794 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0919 22:25:27.369657 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
I0919 22:25:27.393116 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0919 22:25:27.416244 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I0919 22:25:27.439315 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1444 bytes)
I0919 22:25:27.463476 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I0919 22:25:27.486915 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0919 22:25:27.510165 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I0919 22:25:27.534471 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/certs/146335.pem --> /usr/share/ca-certificates/146335.pem (1338 bytes)
I0919 22:25:27.560237 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/files/etc/ssl/certs/1463352.pem --> /usr/share/ca-certificates/1463352.pem (1708 bytes)
I0919 22:25:27.583106 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0919 22:25:27.606007 203160 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/sa.pub (451 bytes)
I0919 22:25:27.623725 203160 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/sa.key (1675 bytes)
I0919 22:25:27.641200 203160 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/front-proxy-ca.crt (1123 bytes)
I0919 22:25:27.658321 203160 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/front-proxy-ca.key (1675 bytes)
I0919 22:25:27.675317 203160 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/etcd/ca.crt (1094 bytes)
I0919 22:25:27.692422 203160 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/etcd/ca.key (1675 bytes)
I0919 22:25:27.709455 203160 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (744 bytes)
I0919 22:25:27.727392 203160 ssh_runner.go:195] Run: openssl version
I0919 22:25:27.732862 203160 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0919 22:25:27.742299 203160 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0919 22:25:27.745678 203160 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Sep 19 22:15 /usr/share/ca-certificates/minikubeCA.pem
I0919 22:25:27.745728 203160 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0919 22:25:27.752398 203160 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0919 22:25:27.761605 203160 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/146335.pem && ln -fs /usr/share/ca-certificates/146335.pem /etc/ssl/certs/146335.pem"
I0919 22:25:27.771021 203160 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/146335.pem
I0919 22:25:27.774382 203160 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Sep 19 22:20 /usr/share/ca-certificates/146335.pem
I0919 22:25:27.774418 203160 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/146335.pem
I0919 22:25:27.781109 203160 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/146335.pem /etc/ssl/certs/51391683.0"
I0919 22:25:27.790814 203160 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/1463352.pem && ln -fs /usr/share/ca-certificates/1463352.pem /etc/ssl/certs/1463352.pem"
I0919 22:25:27.799904 203160 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/1463352.pem
I0919 22:25:27.803130 203160 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Sep 19 22:20 /usr/share/ca-certificates/1463352.pem
I0919 22:25:27.803179 203160 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/1463352.pem
I0919 22:25:27.809808 203160 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/1463352.pem /etc/ssl/certs/3ec20f2e.0"
I0919 22:25:27.819246 203160 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0919 22:25:27.822627 203160 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I0919 22:25:27.822680 203160 kubeadm.go:926] updating node {m03 192.168.49.4 8443 v1.34.0 docker true true} ...
I0919 22:25:27.822775 203160 kubeadm.go:938] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=ha-434755-m03 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.4
[Install]
config:
{KubernetesVersion:v1.34.0 ClusterName:ha-434755 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0919 22:25:27.822800 203160 kube-vip.go:115] generating kube-vip config ...
I0919 22:25:27.822828 203160 ssh_runner.go:195] Run: sudo sh -c "lsmod | grep ip_vs"
I0919 22:25:27.834857 203160 kube-vip.go:163] giving up enabling control-plane load-balancing as ipvs kernel modules appears not to be available: sudo sh -c "lsmod | grep ip_vs": Process exited with status 1
stdout:
stderr:
I0919 22:25:27.834926 203160 kube-vip.go:137] kube-vip config:
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
name: kube-vip
namespace: kube-system
spec:
containers:
- args:
- manager
env:
- name: vip_arp
value: "true"
- name: port
value: "8443"
- name: vip_nodename
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: vip_interface
value: eth0
- name: vip_cidr
value: "32"
- name: dns_mode
value: first
- name: cp_enable
value: "true"
- name: cp_namespace
value: kube-system
- name: vip_leaderelection
value: "true"
- name: vip_leasename
value: plndr-cp-lock
- name: vip_leaseduration
value: "5"
- name: vip_renewdeadline
value: "3"
- name: vip_retryperiod
value: "1"
- name: address
value: 192.168.49.254
- name: prometheus_server
value: :2112
image: ghcr.io/kube-vip/kube-vip:v1.0.0
imagePullPolicy: IfNotPresent
name: kube-vip
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_RAW
volumeMounts:
- mountPath: /etc/kubernetes/admin.conf
name: kubeconfig
hostAliases:
- hostnames:
- kubernetes
ip: 127.0.0.1
hostNetwork: true
volumes:
- hostPath:
path: "/etc/kubernetes/admin.conf"
name: kubeconfig
status: {}
I0919 22:25:27.834980 203160 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.0
I0919 22:25:27.843463 203160 binaries.go:44] Found k8s binaries, skipping transfer
I0919 22:25:27.843532 203160 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /etc/kubernetes/manifests
I0919 22:25:27.852030 203160 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (312 bytes)
I0919 22:25:27.869894 203160 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0919 22:25:27.888537 203160 ssh_runner.go:362] scp memory --> /etc/kubernetes/manifests/kube-vip.yaml (1358 bytes)
I0919 22:25:27.908135 203160 ssh_runner.go:195] Run: grep 192.168.49.254 control-plane.minikube.internal$ /etc/hosts
I0919 22:25:27.911776 203160 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.254 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0919 22:25:27.923898 203160 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0919 22:25:27.989986 203160 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0919 22:25:28.015049 203160 host.go:66] Checking if "ha-434755" exists ...
I0919 22:25:28.015341 203160 start.go:317] joinCluster: &{Name:ha-434755 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:ha-434755 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[]
DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true} {Name:m02 IP:192.168.49.3 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true} {Name:m03 IP:192.168.49.4 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:false efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:f
alse logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:false storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticI
P: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0919 22:25:28.015488 203160 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm token create --print-join-command --ttl=0"
I0919 22:25:28.015561 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755
I0919 22:25:28.036185 203160 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32783 SSHKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755/id_rsa Username:docker}
I0919 22:25:28.179815 203160 start.go:343] trying to join control-plane node "m03" to cluster: &{Name:m03 IP:192.168.49.4 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}
I0919 22:25:28.179865 203160 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm join control-plane.minikube.internal:8443 --token ktda9v.620xzponyzx4q4u3 --discovery-token-ca-cert-hash sha256:6e34938835ca5de20dcd743043ff221a1493ef970b34561f39a513839570935a --ignore-preflight-errors=all --cri-socket unix:///var/run/cri-dockerd.sock --node-name=ha-434755-m03 --control-plane --apiserver-advertise-address=192.168.49.4 --apiserver-bind-port=8443"
I0919 22:25:39.101433 203160 ssh_runner.go:235] Completed: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm join control-plane.minikube.internal:8443 --token ktda9v.620xzponyzx4q4u3 --discovery-token-ca-cert-hash sha256:6e34938835ca5de20dcd743043ff221a1493ef970b34561f39a513839570935a --ignore-preflight-errors=all --cri-socket unix:///var/run/cri-dockerd.sock --node-name=ha-434755-m03 --control-plane --apiserver-advertise-address=192.168.49.4 --apiserver-bind-port=8443": (10.921540133s)
I0919 22:25:39.101473 203160 ssh_runner.go:195] Run: /bin/bash -c "sudo systemctl daemon-reload && sudo systemctl enable kubelet && sudo systemctl start kubelet"
I0919 22:25:39.324555 203160 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes ha-434755-m03 minikube.k8s.io/updated_at=2025_09_19T22_25_39_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=6e37ee63f758843bb5fe33c3a528c564c4b83d53 minikube.k8s.io/name=ha-434755 minikube.k8s.io/primary=false
I0919 22:25:39.399339 203160 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig taint nodes ha-434755-m03 node-role.kubernetes.io/control-plane:NoSchedule-
I0919 22:25:39.475025 203160 start.go:319] duration metric: took 11.459681606s to joinCluster
I0919 22:25:39.475121 203160 start.go:235] Will wait 6m0s for node &{Name:m03 IP:192.168.49.4 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}
I0919 22:25:39.475445 203160 config.go:182] Loaded profile config "ha-434755": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0919 22:25:39.476384 203160 out.go:179] * Verifying Kubernetes components...
I0919 22:25:39.477465 203160 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0919 22:25:39.581053 203160 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0919 22:25:39.594584 203160 kapi.go:59] client config for ha-434755: &rest.Config{Host:"https://192.168.49.254:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/client.crt", KeyFile:"/home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/client.key", CAFile:"/home/jenkins/minikube-integration/21594-142711/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(nil), CAData:[]uint8(nil), NextProtos:[]string(n
il)}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x27f4a00), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), WarningHandlerWithContext:rest.WarningHandlerWithContext(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
W0919 22:25:39.594654 203160 kubeadm.go:483] Overriding stale ClientConfig host https://192.168.49.254:8443 with https://192.168.49.2:8443
I0919 22:25:39.594885 203160 node_ready.go:35] waiting up to 6m0s for node "ha-434755-m03" to be "Ready" ...
W0919 22:25:41.598871 203160 node_ready.go:57] node "ha-434755-m03" has "Ready":"False" status (will retry)
I0919 22:25:43.601543 203160 node_ready.go:49] node "ha-434755-m03" is "Ready"
I0919 22:25:43.601575 203160 node_ready.go:38] duration metric: took 4.006671921s for node "ha-434755-m03" to be "Ready" ...
I0919 22:25:43.601598 203160 api_server.go:52] waiting for apiserver process to appear ...
I0919 22:25:43.601660 203160 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0919 22:25:43.617376 203160 api_server.go:72] duration metric: took 4.142210029s to wait for apiserver process to appear ...
I0919 22:25:43.617405 203160 api_server.go:88] waiting for apiserver healthz status ...
I0919 22:25:43.617428 203160 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8443/healthz ...
I0919 22:25:43.622827 203160 api_server.go:279] https://192.168.49.2:8443/healthz returned 200:
ok
I0919 22:25:43.624139 203160 api_server.go:141] control plane version: v1.34.0
I0919 22:25:43.624164 203160 api_server.go:131] duration metric: took 6.751487ms to wait for apiserver health ...
I0919 22:25:43.624175 203160 system_pods.go:43] waiting for kube-system pods to appear ...
I0919 22:25:43.631480 203160 system_pods.go:59] 25 kube-system pods found
I0919 22:25:43.631526 203160 system_pods.go:61] "coredns-66bc5c9577-4lmln" [0f31e1cc-6bbb-4987-93c7-48e61288b609] Running
I0919 22:25:43.631534 203160 system_pods.go:61] "coredns-66bc5c9577-w8trg" [54431fee-554c-4c3c-9c81-d779981d36db] Running
I0919 22:25:43.631540 203160 system_pods.go:61] "etcd-ha-434755" [efa4db41-3739-45d6-ada5-d66dd5b82f46] Running
I0919 22:25:43.631545 203160 system_pods.go:61] "etcd-ha-434755-m02" [c47d7da8-6337-4062-a7d1-707ebc8f4df5] Running
I0919 22:25:43.631555 203160 system_pods.go:61] "etcd-ha-434755-m03" [6e3492c7-5026-460d-87b4-e3e52a2a36ab] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
I0919 22:25:43.631565 203160 system_pods.go:61] "kindnet-74q9s" [06bab6e9-ad22-4651-947e-723307c31d04] Running
I0919 22:25:43.631584 203160 system_pods.go:61] "kindnet-djvx4" [dd2c97ac-215c-4657-a3af-bf74603285af] Running
I0919 22:25:43.631592 203160 system_pods.go:61] "kindnet-jrkrv" [61220abf-7b4e-440a-a5aa-788c5991cacc] Pending / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni])
I0919 22:25:43.631602 203160 system_pods.go:61] "kube-apiserver-ha-434755" [fdcd2f64-6b9f-40ed-be07-24beef072bca] Running
I0919 22:25:43.631607 203160 system_pods.go:61] "kube-apiserver-ha-434755-m02" [bcc4bd8e-7086-4034-94f8-865e02212e7b] Running
I0919 22:25:43.631624 203160 system_pods.go:61] "kube-apiserver-ha-434755-m03" [acbc85b2-3446-4129-99c3-618e857912fb] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I0919 22:25:43.631633 203160 system_pods.go:61] "kube-controller-manager-ha-434755" [66066c78-f094-492d-9c71-a683cccd45a0] Running
I0919 22:25:43.631639 203160 system_pods.go:61] "kube-controller-manager-ha-434755-m02" [290b348b-6c1a-4891-990b-c943066ab212] Running
I0919 22:25:43.631652 203160 system_pods.go:61] "kube-controller-manager-ha-434755-m03" [3eb7c63e-1489-403e-9409-e9c347fff4c0] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
I0919 22:25:43.631660 203160 system_pods.go:61] "kube-proxy-4cnsm" [a477a521-e24b-449d-854f-c873cb517164] Running
I0919 22:25:43.631668 203160 system_pods.go:61] "kube-proxy-dzrbh" [6a5d3a9f-e63f-43df-bd58-596dc274f097] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy])
I0919 22:25:43.631675 203160 system_pods.go:61] "kube-proxy-gzpg8" [9d9843d9-c2ca-4751-8af5-f8fc91cf07c9] Running
I0919 22:25:43.631683 203160 system_pods.go:61] "kube-proxy-vwrdt" [e3337cd7-84eb-4ddd-921f-1ef42899cc96] Failed / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy])
I0919 22:25:43.631692 203160 system_pods.go:61] "kube-scheduler-ha-434755" [593d9f5b-40f3-47b7-aef2-b25348983754] Running
I0919 22:25:43.631698 203160 system_pods.go:61] "kube-scheduler-ha-434755-m02" [34109527-5e07-415c-9bfc-d500d75092ca] Running
I0919 22:25:43.631709 203160 system_pods.go:61] "kube-scheduler-ha-434755-m03" [65aaaab6-6371-4454-b404-7fe2f6c4e41a] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I0919 22:25:43.631718 203160 system_pods.go:61] "kube-vip-ha-434755" [eb65f5df-597d-4d36-b4c4-e33b1c1a6b35] Running
I0919 22:25:43.631724 203160 system_pods.go:61] "kube-vip-ha-434755-m02" [30071515-3665-4872-a66b-3d8ddccb0cae] Running
I0919 22:25:43.631732 203160 system_pods.go:61] "kube-vip-ha-434755-m03" [58560a63-dc5d-41bc-9805-e904f49b2cad] Running
I0919 22:25:43.631737 203160 system_pods.go:61] "storage-provisioner" [fb950ab4-a515-4298-b7f0-e01d6290af75] Running
I0919 22:25:43.631747 203160 system_pods.go:74] duration metric: took 7.564894ms to wait for pod list to return data ...
I0919 22:25:43.631760 203160 default_sa.go:34] waiting for default service account to be created ...
I0919 22:25:43.635188 203160 default_sa.go:45] found service account: "default"
I0919 22:25:43.635210 203160 default_sa.go:55] duration metric: took 3.443504ms for default service account to be created ...
I0919 22:25:43.635221 203160 system_pods.go:116] waiting for k8s-apps to be running ...
I0919 22:25:43.640825 203160 system_pods.go:86] 24 kube-system pods found
I0919 22:25:43.640849 203160 system_pods.go:89] "coredns-66bc5c9577-4lmln" [0f31e1cc-6bbb-4987-93c7-48e61288b609] Running
I0919 22:25:43.640854 203160 system_pods.go:89] "coredns-66bc5c9577-w8trg" [54431fee-554c-4c3c-9c81-d779981d36db] Running
I0919 22:25:43.640858 203160 system_pods.go:89] "etcd-ha-434755" [efa4db41-3739-45d6-ada5-d66dd5b82f46] Running
I0919 22:25:43.640861 203160 system_pods.go:89] "etcd-ha-434755-m02" [c47d7da8-6337-4062-a7d1-707ebc8f4df5] Running
I0919 22:25:43.640867 203160 system_pods.go:89] "etcd-ha-434755-m03" [6e3492c7-5026-460d-87b4-e3e52a2a36ab] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
I0919 22:25:43.640872 203160 system_pods.go:89] "kindnet-74q9s" [06bab6e9-ad22-4651-947e-723307c31d04] Running
I0919 22:25:43.640877 203160 system_pods.go:89] "kindnet-djvx4" [dd2c97ac-215c-4657-a3af-bf74603285af] Running
I0919 22:25:43.640883 203160 system_pods.go:89] "kindnet-jrkrv" [61220abf-7b4e-440a-a5aa-788c5991cacc] Pending / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni])
I0919 22:25:43.640889 203160 system_pods.go:89] "kube-apiserver-ha-434755" [fdcd2f64-6b9f-40ed-be07-24beef072bca] Running
I0919 22:25:43.640893 203160 system_pods.go:89] "kube-apiserver-ha-434755-m02" [bcc4bd8e-7086-4034-94f8-865e02212e7b] Running
I0919 22:25:43.640901 203160 system_pods.go:89] "kube-apiserver-ha-434755-m03" [acbc85b2-3446-4129-99c3-618e857912fb] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I0919 22:25:43.640907 203160 system_pods.go:89] "kube-controller-manager-ha-434755" [66066c78-f094-492d-9c71-a683cccd45a0] Running
I0919 22:25:43.640913 203160 system_pods.go:89] "kube-controller-manager-ha-434755-m02" [290b348b-6c1a-4891-990b-c943066ab212] Running
I0919 22:25:43.640922 203160 system_pods.go:89] "kube-controller-manager-ha-434755-m03" [3eb7c63e-1489-403e-9409-e9c347fff4c0] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
I0919 22:25:43.640927 203160 system_pods.go:89] "kube-proxy-4cnsm" [a477a521-e24b-449d-854f-c873cb517164] Running
I0919 22:25:43.640932 203160 system_pods.go:89] "kube-proxy-dzrbh" [6a5d3a9f-e63f-43df-bd58-596dc274f097] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy])
I0919 22:25:43.640937 203160 system_pods.go:89] "kube-proxy-gzpg8" [9d9843d9-c2ca-4751-8af5-f8fc91cf07c9] Running
I0919 22:25:43.640941 203160 system_pods.go:89] "kube-scheduler-ha-434755" [593d9f5b-40f3-47b7-aef2-b25348983754] Running
I0919 22:25:43.640944 203160 system_pods.go:89] "kube-scheduler-ha-434755-m02" [34109527-5e07-415c-9bfc-d500d75092ca] Running
I0919 22:25:43.640952 203160 system_pods.go:89] "kube-scheduler-ha-434755-m03" [65aaaab6-6371-4454-b404-7fe2f6c4e41a] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I0919 22:25:43.640958 203160 system_pods.go:89] "kube-vip-ha-434755" [eb65f5df-597d-4d36-b4c4-e33b1c1a6b35] Running
I0919 22:25:43.640966 203160 system_pods.go:89] "kube-vip-ha-434755-m02" [30071515-3665-4872-a66b-3d8ddccb0cae] Running
I0919 22:25:43.640971 203160 system_pods.go:89] "kube-vip-ha-434755-m03" [58560a63-dc5d-41bc-9805-e904f49b2cad] Running
I0919 22:25:43.640974 203160 system_pods.go:89] "storage-provisioner" [fb950ab4-a515-4298-b7f0-e01d6290af75] Running
I0919 22:25:43.640981 203160 system_pods.go:126] duration metric: took 5.753999ms to wait for k8s-apps to be running ...
I0919 22:25:43.640989 203160 system_svc.go:44] waiting for kubelet service to be running ....
I0919 22:25:43.641031 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I0919 22:25:43.653532 203160 system_svc.go:56] duration metric: took 12.534189ms WaitForService to wait for kubelet
I0919 22:25:43.653556 203160 kubeadm.go:578] duration metric: took 4.178399256s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0919 22:25:43.653573 203160 node_conditions.go:102] verifying NodePressure condition ...
I0919 22:25:43.656435 203160 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I0919 22:25:43.656455 203160 node_conditions.go:123] node cpu capacity is 8
I0919 22:25:43.656467 203160 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I0919 22:25:43.656470 203160 node_conditions.go:123] node cpu capacity is 8
I0919 22:25:43.656475 203160 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I0919 22:25:43.656479 203160 node_conditions.go:123] node cpu capacity is 8
I0919 22:25:43.656484 203160 node_conditions.go:105] duration metric: took 2.906956ms to run NodePressure ...
I0919 22:25:43.656557 203160 start.go:241] waiting for startup goroutines ...
I0919 22:25:43.656587 203160 start.go:255] writing updated cluster config ...
I0919 22:25:43.656893 203160 ssh_runner.go:195] Run: rm -f paused
I0919 22:25:43.660610 203160 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I0919 22:25:43.661067 203160 kapi.go:59] client config for ha-434755: &rest.Config{Host:"https://192.168.49.254:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/client.crt", KeyFile:"/home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/client.key", CAFile:"/home/jenkins/minikube-integration/21594-142711/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(nil), CAData:[]uint8(nil), NextProtos:[]string(n
il)}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x27f4a00), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), WarningHandlerWithContext:rest.WarningHandlerWithContext(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
I0919 22:25:43.664242 203160 pod_ready.go:83] waiting for pod "coredns-66bc5c9577-4lmln" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:43.669047 203160 pod_ready.go:94] pod "coredns-66bc5c9577-4lmln" is "Ready"
I0919 22:25:43.669069 203160 pod_ready.go:86] duration metric: took 4.804098ms for pod "coredns-66bc5c9577-4lmln" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:43.669076 203160 pod_ready.go:83] waiting for pod "coredns-66bc5c9577-w8trg" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:43.673294 203160 pod_ready.go:94] pod "coredns-66bc5c9577-w8trg" is "Ready"
I0919 22:25:43.673313 203160 pod_ready.go:86] duration metric: took 4.232517ms for pod "coredns-66bc5c9577-w8trg" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:43.676291 203160 pod_ready.go:83] waiting for pod "etcd-ha-434755" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:43.681202 203160 pod_ready.go:94] pod "etcd-ha-434755" is "Ready"
I0919 22:25:43.681224 203160 pod_ready.go:86] duration metric: took 4.891614ms for pod "etcd-ha-434755" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:43.681231 203160 pod_ready.go:83] waiting for pod "etcd-ha-434755-m02" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:43.685174 203160 pod_ready.go:94] pod "etcd-ha-434755-m02" is "Ready"
I0919 22:25:43.685197 203160 pod_ready.go:86] duration metric: took 3.961188ms for pod "etcd-ha-434755-m02" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:43.685203 203160 pod_ready.go:83] waiting for pod "etcd-ha-434755-m03" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:43.861561 203160 request.go:683] "Waited before sending request" delay="176.248264ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/etcd-ha-434755-m03"
I0919 22:25:44.062212 203160 request.go:683] "Waited before sending request" delay="197.34334ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-434755-m03"
I0919 22:25:44.261544 203160 request.go:683] "Waited before sending request" delay="75.158894ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/etcd-ha-434755-m03"
I0919 22:25:44.461584 203160 request.go:683] "Waited before sending request" delay="196.309622ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-434755-m03"
I0919 22:25:44.861909 203160 request.go:683] "Waited before sending request" delay="172.267033ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-434755-m03"
I0919 22:25:45.261844 203160 request.go:683] "Waited before sending request" delay="72.222149ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-434755-m03"
W0919 22:25:45.690633 203160 pod_ready.go:104] pod "etcd-ha-434755-m03" is not "Ready", error: <nil>
I0919 22:25:46.192067 203160 pod_ready.go:94] pod "etcd-ha-434755-m03" is "Ready"
I0919 22:25:46.192098 203160 pod_ready.go:86] duration metric: took 2.50688828s for pod "etcd-ha-434755-m03" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:46.262400 203160 request.go:683] "Waited before sending request" delay="70.17118ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods?labelSelector=component%3Dkube-apiserver"
I0919 22:25:46.266643 203160 pod_ready.go:83] waiting for pod "kube-apiserver-ha-434755" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:46.462133 203160 request.go:683] "Waited before sending request" delay="195.353683ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-apiserver-ha-434755"
I0919 22:25:46.661695 203160 request.go:683] "Waited before sending request" delay="196.23519ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-434755"
I0919 22:25:46.664990 203160 pod_ready.go:94] pod "kube-apiserver-ha-434755" is "Ready"
I0919 22:25:46.665013 203160 pod_ready.go:86] duration metric: took 398.342895ms for pod "kube-apiserver-ha-434755" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:46.665024 203160 pod_ready.go:83] waiting for pod "kube-apiserver-ha-434755-m02" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:46.862485 203160 request.go:683] "Waited before sending request" delay="197.349925ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-apiserver-ha-434755-m02"
I0919 22:25:47.062458 203160 request.go:683] "Waited before sending request" delay="196.27598ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-434755-m02"
I0919 22:25:47.066027 203160 pod_ready.go:94] pod "kube-apiserver-ha-434755-m02" is "Ready"
I0919 22:25:47.066062 203160 pod_ready.go:86] duration metric: took 401.030788ms for pod "kube-apiserver-ha-434755-m02" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:47.066074 203160 pod_ready.go:83] waiting for pod "kube-apiserver-ha-434755-m03" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:47.262536 203160 request.go:683] "Waited before sending request" delay="196.349445ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-apiserver-ha-434755-m03"
I0919 22:25:47.461658 203160 request.go:683] "Waited before sending request" delay="196.15827ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-434755-m03"
I0919 22:25:47.662339 203160 request.go:683] "Waited before sending request" delay="95.242557ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-apiserver-ha-434755-m03"
I0919 22:25:47.861611 203160 request.go:683] "Waited before sending request" delay="196.286818ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-434755-m03"
I0919 22:25:48.262313 203160 request.go:683] "Waited before sending request" delay="192.342763ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-434755-m03"
I0919 22:25:48.661859 203160 request.go:683] "Waited before sending request" delay="92.219172ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-434755-m03"
W0919 22:25:49.071933 203160 pod_ready.go:104] pod "kube-apiserver-ha-434755-m03" is not "Ready", error: <nil>
I0919 22:25:51.071739 203160 pod_ready.go:94] pod "kube-apiserver-ha-434755-m03" is "Ready"
I0919 22:25:51.071767 203160 pod_ready.go:86] duration metric: took 4.005686408s for pod "kube-apiserver-ha-434755-m03" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:51.074543 203160 pod_ready.go:83] waiting for pod "kube-controller-manager-ha-434755" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:51.262152 203160 request.go:683] "Waited before sending request" delay="185.334685ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-434755"
I0919 22:25:51.265630 203160 pod_ready.go:94] pod "kube-controller-manager-ha-434755" is "Ready"
I0919 22:25:51.265657 203160 pod_ready.go:86] duration metric: took 191.092666ms for pod "kube-controller-manager-ha-434755" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:51.265666 203160 pod_ready.go:83] waiting for pod "kube-controller-manager-ha-434755-m02" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:51.462098 203160 request.go:683] "Waited before sending request" delay="196.345826ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-434755-m02"
I0919 22:25:51.661912 203160 request.go:683] "Waited before sending request" delay="196.187823ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-434755-m02"
I0919 22:25:51.665191 203160 pod_ready.go:94] pod "kube-controller-manager-ha-434755-m02" is "Ready"
I0919 22:25:51.665224 203160 pod_ready.go:86] duration metric: took 399.551288ms for pod "kube-controller-manager-ha-434755-m02" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:51.665233 203160 pod_ready.go:83] waiting for pod "kube-controller-manager-ha-434755-m03" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:51.861619 203160 request.go:683] "Waited before sending request" delay="196.276968ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-434755-m03"
I0919 22:25:52.062202 203160 request.go:683] "Waited before sending request" delay="197.351779ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-434755-m03"
I0919 22:25:52.065578 203160 pod_ready.go:94] pod "kube-controller-manager-ha-434755-m03" is "Ready"
I0919 22:25:52.065604 203160 pod_ready.go:86] duration metric: took 400.365679ms for pod "kube-controller-manager-ha-434755-m03" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:52.262003 203160 request.go:683] "Waited before sending request" delay="196.29708ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods?labelSelector=k8s-app%3Dkube-proxy"
I0919 22:25:52.265548 203160 pod_ready.go:83] waiting for pod "kube-proxy-4cnsm" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:52.462021 203160 request.go:683] "Waited before sending request" delay="196.352536ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-proxy-4cnsm"
I0919 22:25:52.662519 203160 request.go:683] "Waited before sending request" delay="196.351016ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-434755-m02"
I0919 22:25:52.665831 203160 pod_ready.go:94] pod "kube-proxy-4cnsm" is "Ready"
I0919 22:25:52.665859 203160 pod_ready.go:86] duration metric: took 400.28275ms for pod "kube-proxy-4cnsm" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:52.665868 203160 pod_ready.go:83] waiting for pod "kube-proxy-dzrbh" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:52.862291 203160 request.go:683] "Waited before sending request" delay="196.344667ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-proxy-dzrbh"
I0919 22:25:53.061976 203160 request.go:683] "Waited before sending request" delay="196.35101ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-434755-m03"
I0919 22:25:53.261911 203160 request.go:683] "Waited before sending request" delay="95.241357ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-proxy-dzrbh"
I0919 22:25:53.461590 203160 request.go:683] "Waited before sending request" delay="196.28491ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-434755-m03"
I0919 22:25:53.862244 203160 request.go:683] "Waited before sending request" delay="192.346086ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-434755-m03"
I0919 22:25:54.261842 203160 request.go:683] "Waited before sending request" delay="92.230453ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-434755-m03"
W0919 22:25:54.671717 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:25:56.671839 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:25:58.672473 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:01.172572 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:03.672671 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:06.172469 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:08.672353 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:11.172405 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:13.672314 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:16.172799 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:18.672196 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:20.672298 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:23.171528 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:25.172008 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:27.172570 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:29.672449 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:31.672563 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:33.672868 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:36.170989 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:38.171892 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:40.172022 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:42.172174 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:44.671993 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:47.171063 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:49.172486 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:51.672732 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:54.172023 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:56.172144 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:58.671775 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:00.671992 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:03.171993 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:05.671723 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:08.171842 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:10.172121 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:12.672014 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:15.172390 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:17.172822 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:19.672126 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:21.673333 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:24.171769 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:26.672310 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:29.171411 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:31.171872 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:33.172386 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:35.172451 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:37.672546 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:40.172235 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:42.172963 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:44.671777 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:46.671841 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:49.171918 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:51.172295 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:53.671812 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:55.672948 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:58.171734 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:00.172103 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:02.174861 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:04.672033 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:07.171816 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:09.671792 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:11.672609 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:14.171130 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:16.172329 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:18.672102 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:21.172674 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:23.173027 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:25.672026 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:28.171975 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:30.672302 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:32.672601 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:35.171532 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:37.171862 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:39.672084 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:42.172811 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:44.672206 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:46.672508 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:49.171457 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:51.172154 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:53.172276 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:55.672125 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:58.173041 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:29:00.672216 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:29:03.172384 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:29:05.673458 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:29:08.172666 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:29:10.672118 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:29:13.171914 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:29:15.172099 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:29:17.671977 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:29:20.172061 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:29:22.671971 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:29:24.672271 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:29:27.171769 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:29:29.172036 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:29:31.172563 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:29:33.672797 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:29:36.171859 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:29:38.671554 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:29:41.171621 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:29:43.172570 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
I0919 22:29:43.661688 203160 pod_ready.go:86] duration metric: took 3m50.995803943s for pod "kube-proxy-dzrbh" in "kube-system" namespace to be "Ready" or be gone ...
W0919 22:29:43.661752 203160 pod_ready.go:65] not all pods in "kube-system" namespace with "k8s-app=kube-proxy" label are "Ready", will retry: waitPodCondition: context deadline exceeded
I0919 22:29:43.661771 203160 pod_ready.go:40] duration metric: took 4m0.001130626s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I0919 22:29:43.663339 203160 out.go:203]
W0919 22:29:43.664381 203160 out.go:285] X Exiting due to GUEST_START: extra waiting: WaitExtra: context deadline exceeded
X Exiting due to GUEST_START: extra waiting: WaitExtra: context deadline exceeded
I0919 22:29:43.665560 203160 out.go:203]
** /stderr **
ha_test.go:103: failed to fresh-start ha (multi-control plane) cluster. args "out/minikube-linux-amd64 -p ha-434755 start --ha --memory 3072 --wait true --alsologtostderr -v 5 --driver=docker --container-runtime=docker" : exit status 80
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:223: ======> post-mortem[TestMultiControlPlane/serial/StartCluster]: network settings <======
helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:238: ======> post-mortem[TestMultiControlPlane/serial/StartCluster]: docker inspect <======
helpers_test.go:239: (dbg) Run: docker inspect ha-434755
helpers_test.go:243: (dbg) docker inspect ha-434755:
-- stdout --
[
{
"Id": "3c5829252b8b881f15f3c54c4ba70d1490c8ac9fbae20a31fdf9d65226d1379e",
"Created": "2025-09-19T22:24:25.435908216Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 203722,
"ExitCode": 0,
"Error": "",
"StartedAt": "2025-09-19T22:24:25.464542616Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:c6b5532e987b5b4f5fc9cb0336e378ed49c0542bad8cbfc564b71e977a6269de",
"ResolvConfPath": "/var/lib/docker/containers/3c5829252b8b881f15f3c54c4ba70d1490c8ac9fbae20a31fdf9d65226d1379e/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/3c5829252b8b881f15f3c54c4ba70d1490c8ac9fbae20a31fdf9d65226d1379e/hostname",
"HostsPath": "/var/lib/docker/containers/3c5829252b8b881f15f3c54c4ba70d1490c8ac9fbae20a31fdf9d65226d1379e/hosts",
"LogPath": "/var/lib/docker/containers/3c5829252b8b881f15f3c54c4ba70d1490c8ac9fbae20a31fdf9d65226d1379e/3c5829252b8b881f15f3c54c4ba70d1490c8ac9fbae20a31fdf9d65226d1379e-json.log",
"Name": "/ha-434755",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"ha-434755:/var",
"/lib/modules:/lib/modules:ro"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {
"max-size": "100m"
}
},
"NetworkMode": "ha-434755",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "private",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 3221225472,
"NanoCpus": 0,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 6442450944,
"MemorySwappiness": null,
"OomKillDisable": null,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "3c5829252b8b881f15f3c54c4ba70d1490c8ac9fbae20a31fdf9d65226d1379e",
"LowerDir": "/var/lib/docker/overlay2/fa8484ef68691db024ec039bfca147494e07d923a6d3b6608b222c7b12e4a90c-init/diff:/var/lib/docker/overlay2/9d2e369e5d97e1c9099e0626e9d6e97dbea1f066bb5f1a75d4701fbdb3248b63/diff",
"MergedDir": "/var/lib/docker/overlay2/fa8484ef68691db024ec039bfca147494e07d923a6d3b6608b222c7b12e4a90c/merged",
"UpperDir": "/var/lib/docker/overlay2/fa8484ef68691db024ec039bfca147494e07d923a6d3b6608b222c7b12e4a90c/diff",
"WorkDir": "/var/lib/docker/overlay2/fa8484ef68691db024ec039bfca147494e07d923a6d3b6608b222c7b12e4a90c/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "volume",
"Name": "ha-434755",
"Source": "/var/lib/docker/volumes/ha-434755/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
},
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
}
],
"Config": {
"Hostname": "ha-434755",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "ha-434755",
"name.minikube.sigs.k8s.io": "ha-434755",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "a0bf828a3209b8c3d2ad3e733e50f6df1f50e409f342a092c4c814dd4568d0ec",
"SandboxKey": "/var/run/docker/netns/a0bf828a3209",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32783"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32784"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32787"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32785"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32786"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"ha-434755": {
"IPAMConfig": {
"IPv4Address": "192.168.49.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "32:f7:72:52:e8:45",
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "db70212208592ba3a09cb1094d6c6cf228f6e4f0d26c9a33f52f5ec9e3d42878",
"EndpointID": "b635e0cc6dc79a8f2eb8d44fbb74681cf1e5b405f36f7c9fa0b8f88a40d54eb0",
"Gateway": "192.168.49.1",
"IPAddress": "192.168.49.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"ha-434755",
"3c5829252b8b"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p ha-434755 -n ha-434755
helpers_test.go:252: <<< TestMultiControlPlane/serial/StartCluster FAILED: start of post-mortem logs <<<
helpers_test.go:253: ======> post-mortem[TestMultiControlPlane/serial/StartCluster]: minikube logs <======
helpers_test.go:255: (dbg) Run: out/minikube-linux-amd64 -p ha-434755 logs -n 25
helpers_test.go:260: TestMultiControlPlane/serial/StartCluster logs:
-- stdout --
==> Audit <==
┌────────────────┬───────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬───────────────────┬─────────┬─────────┬─────────────────────┬─────────────────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├────────────────┼───────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼───────────────────┼─────────┼─────────┼─────────────────────┼─────────────────────┤
│ cache │ functional-432755 cache reload │ functional-432755 │ jenkins │ v1.37.0 │ 19 Sep 25 22:22 UTC │ 19 Sep 25 22:22 UTC │
│ cache │ functional-432755 cache add minikube-local-cache-test:functional-432755 │ functional-432755 │ jenkins │ v1.37.0 │ 19 Sep 25 22:22 UTC │ 19 Sep 25 22:22 UTC │
│ cache │ functional-432755 cache delete minikube-local-cache-test:functional-432755 │ functional-432755 │ jenkins │ v1.37.0 │ 19 Sep 25 22:22 UTC │ 19 Sep 25 22:22 UTC │
│ ssh │ functional-432755 ssh sudo crictl inspecti registry.k8s.io/pause:latest │ functional-432755 │ jenkins │ v1.37.0 │ 19 Sep 25 22:22 UTC │ 19 Sep 25 22:22 UTC │
│ cache │ delete registry.k8s.io/pause:3.3 │ minikube │ jenkins │ v1.37.0 │ 19 Sep 25 22:22 UTC │ 19 Sep 25 22:22 UTC │
│ cache │ delete registry.k8s.io/pause:3.1 │ minikube │ jenkins │ v1.37.0 │ 19 Sep 25 22:22 UTC │ 19 Sep 25 22:22 UTC │
│ cache │ delete registry.k8s.io/pause:latest │ minikube │ jenkins │ v1.37.0 │ 19 Sep 25 22:22 UTC │ 19 Sep 25 22:22 UTC │
│ cache │ list │ minikube │ jenkins │ v1.37.0 │ 19 Sep 25 22:22 UTC │ 19 Sep 25 22:22 UTC │
│ service │ functional-432755 service --namespace=default --https --url hello-node │ functional-432755 │ jenkins │ v1.37.0 │ 19 Sep 25 22:24 UTC │ 19 Sep 25 22:24 UTC │
│ mount │ -p functional-432755 --kill=true │ functional-432755 │ jenkins │ v1.37.0 │ 19 Sep 25 22:24 UTC │ │
│ license │ │ minikube │ jenkins │ v1.37.0 │ 19 Sep 25 22:24 UTC │ 19 Sep 25 22:24 UTC │
│ service │ functional-432755 service hello-node --url --format={{.IP}} │ functional-432755 │ jenkins │ v1.37.0 │ 19 Sep 25 22:24 UTC │ 19 Sep 25 22:24 UTC │
│ update-context │ functional-432755 update-context --alsologtostderr -v=2 │ functional-432755 │ jenkins │ v1.37.0 │ 19 Sep 25 22:24 UTC │ 19 Sep 25 22:24 UTC │
│ update-context │ functional-432755 update-context --alsologtostderr -v=2 │ functional-432755 │ jenkins │ v1.37.0 │ 19 Sep 25 22:24 UTC │ 19 Sep 25 22:24 UTC │
│ update-context │ functional-432755 update-context --alsologtostderr -v=2 │ functional-432755 │ jenkins │ v1.37.0 │ 19 Sep 25 22:24 UTC │ 19 Sep 25 22:24 UTC │
│ image │ functional-432755 image ls --format short --alsologtostderr │ functional-432755 │ jenkins │ v1.37.0 │ 19 Sep 25 22:24 UTC │ 19 Sep 25 22:24 UTC │
│ service │ functional-432755 service hello-node --url │ functional-432755 │ jenkins │ v1.37.0 │ 19 Sep 25 22:24 UTC │ 19 Sep 25 22:24 UTC │
│ image │ functional-432755 image ls --format yaml --alsologtostderr │ functional-432755 │ jenkins │ v1.37.0 │ 19 Sep 25 22:24 UTC │ 19 Sep 25 22:24 UTC │
│ ssh │ functional-432755 ssh pgrep buildkitd │ functional-432755 │ jenkins │ v1.37.0 │ 19 Sep 25 22:24 UTC │ │
│ image │ functional-432755 image ls --format json --alsologtostderr │ functional-432755 │ jenkins │ v1.37.0 │ 19 Sep 25 22:24 UTC │ 19 Sep 25 22:24 UTC │
│ image │ functional-432755 image build -t localhost/my-image:functional-432755 testdata/build --alsologtostderr │ functional-432755 │ jenkins │ v1.37.0 │ 19 Sep 25 22:24 UTC │ 19 Sep 25 22:24 UTC │
│ image │ functional-432755 image ls --format table --alsologtostderr │ functional-432755 │ jenkins │ v1.37.0 │ 19 Sep 25 22:24 UTC │ 19 Sep 25 22:24 UTC │
│ image │ functional-432755 image ls │ functional-432755 │ jenkins │ v1.37.0 │ 19 Sep 25 22:24 UTC │ 19 Sep 25 22:24 UTC │
│ delete │ -p functional-432755 │ functional-432755 │ jenkins │ v1.37.0 │ 19 Sep 25 22:24 UTC │ 19 Sep 25 22:24 UTC │
│ start │ ha-434755 start --ha --memory 3072 --wait true --alsologtostderr -v 5 --driver=docker --container-runtime=docker │ ha-434755 │ jenkins │ v1.37.0 │ 19 Sep 25 22:24 UTC │ │
└────────────────┴───────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴───────────────────┴─────────┴─────────┴─────────────────────┴─────────────────────┘
==> Last Start <==
Log file created at: 2025/09/19 22:24:21
Running on machine: ubuntu-20-agent-10
Binary: Built with gc go1.24.6 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I0919 22:24:21.076123 203160 out.go:360] Setting OutFile to fd 1 ...
I0919 22:24:21.076224 203160 out.go:408] TERM=,COLORTERM=, which probably does not support color
I0919 22:24:21.076232 203160 out.go:374] Setting ErrFile to fd 2...
I0919 22:24:21.076236 203160 out.go:408] TERM=,COLORTERM=, which probably does not support color
I0919 22:24:21.076432 203160 root.go:338] Updating PATH: /home/jenkins/minikube-integration/21594-142711/.minikube/bin
I0919 22:24:21.076920 203160 out.go:368] Setting JSON to false
I0919 22:24:21.077711 203160 start.go:130] hostinfo: {"hostname":"ubuntu-20-agent-10","uptime":3997,"bootTime":1758316664,"procs":190,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"22.04","kernelVersion":"6.8.0-1037-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I0919 22:24:21.077805 203160 start.go:140] virtualization: kvm guest
I0919 22:24:21.079564 203160 out.go:179] * [ha-434755] minikube v1.37.0 on Ubuntu 22.04 (kvm/amd64)
I0919 22:24:21.080690 203160 out.go:179] - MINIKUBE_LOCATION=21594
I0919 22:24:21.080699 203160 notify.go:220] Checking for updates...
I0919 22:24:21.081753 203160 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I0919 22:24:21.082865 203160 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/21594-142711/kubeconfig
I0919 22:24:21.084034 203160 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/21594-142711/.minikube
I0919 22:24:21.085082 203160 out.go:179] - MINIKUBE_BIN=out/minikube-linux-amd64
I0919 22:24:21.086101 203160 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I0919 22:24:21.087230 203160 driver.go:421] Setting default libvirt URI to qemu:///system
I0919 22:24:21.110266 203160 docker.go:123] docker version: linux-28.4.0:Docker Engine - Community
I0919 22:24:21.110338 203160 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0919 22:24:21.164419 203160 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:24 OomKillDisable:false NGoroutines:46 SystemTime:2025-09-19 22:24:21.153482571 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:6.8.0-1037-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:
x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652174848 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-10 Labels:[] ExperimentalBuild:false ServerVersion:28.4.0 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[
map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.28.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.39.4] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner (EXPERIMENTAL) Vendor:Docker Inc. Version:v0.1.39] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0919 22:24:21.164556 203160 docker.go:318] overlay module found
I0919 22:24:21.166256 203160 out.go:179] * Using the docker driver based on user configuration
I0919 22:24:21.167251 203160 start.go:304] selected driver: docker
I0919 22:24:21.167262 203160 start.go:918] validating driver "docker" against <nil>
I0919 22:24:21.167273 203160 start.go:929] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I0919 22:24:21.167837 203160 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0919 22:24:21.218732 203160 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:24 OomKillDisable:false NGoroutines:46 SystemTime:2025-09-19 22:24:21.209383411 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:6.8.0-1037-gcp OperatingSystem:Ubuntu 22.04.5 LTS OSType:linux Architecture:
x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33652174848 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-10 Labels:[] ExperimentalBuild:false ServerVersion:28.4.0 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[
map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.28.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.39.4] map[Name:model Path:/usr/libexec/docker/cli-plugins/docker-model SchemaVersion:0.1.0 ShortDescription:Docker Model Runner (EXPERIMENTAL) Vendor:Docker Inc. Version:v0.1.39] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0919 22:24:21.218890 203160 start_flags.go:327] no existing cluster config was found, will generate one from the flags
I0919 22:24:21.219109 203160 start_flags.go:992] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0919 22:24:21.220600 203160 out.go:179] * Using Docker driver with root privileges
I0919 22:24:21.221617 203160 cni.go:84] Creating CNI manager for ""
I0919 22:24:21.221686 203160 cni.go:136] multinode detected (0 nodes found), recommending kindnet
I0919 22:24:21.221699 203160 start_flags.go:336] Found "CNI" CNI - setting NetworkPlugin=cni
I0919 22:24:21.221777 203160 start.go:348] cluster config:
{Name:ha-434755 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:ha-434755 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin
:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0919 22:24:21.222962 203160 out.go:179] * Starting "ha-434755" primary control-plane node in "ha-434755" cluster
I0919 22:24:21.223920 203160 cache.go:123] Beginning downloading kic base image for docker with docker
I0919 22:24:21.224932 203160 out.go:179] * Pulling base image v0.0.48 ...
I0919 22:24:21.225767 203160 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime docker
I0919 22:24:21.225807 203160 preload.go:146] Found local preload: /home/jenkins/minikube-integration/21594-142711/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-amd64.tar.lz4
I0919 22:24:21.225817 203160 cache.go:58] Caching tarball of preloaded images
I0919 22:24:21.225855 203160 image.go:81] Checking for gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local docker daemon
I0919 22:24:21.225956 203160 preload.go:172] Found /home/jenkins/minikube-integration/21594-142711/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-amd64.tar.lz4 in cache, skipping download
I0919 22:24:21.225972 203160 cache.go:61] Finished verifying existence of preloaded tar for v1.34.0 on docker
I0919 22:24:21.226288 203160 profile.go:143] Saving config to /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/config.json ...
I0919 22:24:21.226314 203160 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/config.json: {Name:mkebfaf58402ee5b29f1d566a094ba67c667bd07 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:24:21.245058 203160 image.go:100] Found gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local docker daemon, skipping pull
I0919 22:24:21.245075 203160 cache.go:147] gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 exists in daemon, skipping load
I0919 22:24:21.245090 203160 cache.go:232] Successfully downloaded all kic artifacts
I0919 22:24:21.245116 203160 start.go:360] acquireMachinesLock for ha-434755: {Name:mkbee2b246a2c7257f14e13c0a2cc8098703a645 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0919 22:24:21.245221 203160 start.go:364] duration metric: took 85.831µs to acquireMachinesLock for "ha-434755"
I0919 22:24:21.245250 203160 start.go:93] Provisioning new machine with config: &{Name:ha-434755 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:ha-434755 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APISer
verIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: Socket
VMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}
I0919 22:24:21.245320 203160 start.go:125] createHost starting for "" (driver="docker")
I0919 22:24:21.246894 203160 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I0919 22:24:21.247127 203160 start.go:159] libmachine.API.Create for "ha-434755" (driver="docker")
I0919 22:24:21.247160 203160 client.go:168] LocalClient.Create starting
I0919 22:24:21.247231 203160 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem
I0919 22:24:21.247268 203160 main.go:141] libmachine: Decoding PEM data...
I0919 22:24:21.247320 203160 main.go:141] libmachine: Parsing certificate...
I0919 22:24:21.247397 203160 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21594-142711/.minikube/certs/cert.pem
I0919 22:24:21.247432 203160 main.go:141] libmachine: Decoding PEM data...
I0919 22:24:21.247449 203160 main.go:141] libmachine: Parsing certificate...
I0919 22:24:21.247869 203160 cli_runner.go:164] Run: docker network inspect ha-434755 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W0919 22:24:21.263071 203160 cli_runner.go:211] docker network inspect ha-434755 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I0919 22:24:21.263128 203160 network_create.go:284] running [docker network inspect ha-434755] to gather additional debugging logs...
I0919 22:24:21.263150 203160 cli_runner.go:164] Run: docker network inspect ha-434755
W0919 22:24:21.278228 203160 cli_runner.go:211] docker network inspect ha-434755 returned with exit code 1
I0919 22:24:21.278257 203160 network_create.go:287] error running [docker network inspect ha-434755]: docker network inspect ha-434755: exit status 1
stdout:
[]
stderr:
Error response from daemon: network ha-434755 not found
I0919 22:24:21.278276 203160 network_create.go:289] output of [docker network inspect ha-434755]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network ha-434755 not found
** /stderr **
I0919 22:24:21.278380 203160 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0919 22:24:21.293889 203160 network.go:206] using free private subnet 192.168.49.0/24: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001a50f90}
I0919 22:24:21.293945 203160 network_create.go:124] attempt to create docker network ha-434755 192.168.49.0/24 with gateway 192.168.49.1 and MTU of 1500 ...
I0919 22:24:21.293988 203160 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.49.0/24 --gateway=192.168.49.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=ha-434755 ha-434755
I0919 22:24:21.346619 203160 network_create.go:108] docker network ha-434755 192.168.49.0/24 created
I0919 22:24:21.346647 203160 kic.go:121] calculated static IP "192.168.49.2" for the "ha-434755" container
I0919 22:24:21.346698 203160 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0919 22:24:21.362122 203160 cli_runner.go:164] Run: docker volume create ha-434755 --label name.minikube.sigs.k8s.io=ha-434755 --label created_by.minikube.sigs.k8s.io=true
I0919 22:24:21.378481 203160 oci.go:103] Successfully created a docker volume ha-434755
I0919 22:24:21.378568 203160 cli_runner.go:164] Run: docker run --rm --name ha-434755-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=ha-434755 --entrypoint /usr/bin/test -v ha-434755:/var gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -d /var/lib
I0919 22:24:21.725934 203160 oci.go:107] Successfully prepared a docker volume ha-434755
I0919 22:24:21.725988 203160 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime docker
I0919 22:24:21.726011 203160 kic.go:194] Starting extracting preloaded images to volume ...
I0919 22:24:21.726083 203160 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21594-142711/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v ha-434755:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -I lz4 -xf /preloaded.tar -C /extractDir
I0919 22:24:25.368758 203160 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21594-142711/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v ha-434755:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -I lz4 -xf /preloaded.tar -C /extractDir: (3.642631223s)
I0919 22:24:25.368791 203160 kic.go:203] duration metric: took 3.642776622s to extract preloaded images to volume ...
W0919 22:24:25.368885 203160 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
W0919 22:24:25.368918 203160 oci.go:252] Your kernel does not support CPU cfs period/quota or the cgroup is not mounted.
I0919 22:24:25.368955 203160 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0919 22:24:25.420305 203160 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname ha-434755 --name ha-434755 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=ha-434755 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=ha-434755 --network ha-434755 --ip 192.168.49.2 --volume ha-434755:/var --security-opt apparmor=unconfined --memory=3072mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1
I0919 22:24:25.661250 203160 cli_runner.go:164] Run: docker container inspect ha-434755 --format={{.State.Running}}
I0919 22:24:25.679605 203160 cli_runner.go:164] Run: docker container inspect ha-434755 --format={{.State.Status}}
I0919 22:24:25.698105 203160 cli_runner.go:164] Run: docker exec ha-434755 stat /var/lib/dpkg/alternatives/iptables
I0919 22:24:25.750352 203160 oci.go:144] the created container "ha-434755" has a running status.
I0919 22:24:25.750385 203160 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755/id_rsa...
I0919 22:24:26.145646 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755/id_rsa.pub -> /home/docker/.ssh/authorized_keys
I0919 22:24:26.145696 203160 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0919 22:24:26.169661 203160 cli_runner.go:164] Run: docker container inspect ha-434755 --format={{.State.Status}}
I0919 22:24:26.186378 203160 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0919 22:24:26.186402 203160 kic_runner.go:114] Args: [docker exec --privileged ha-434755 chown docker:docker /home/docker/.ssh/authorized_keys]
I0919 22:24:26.236428 203160 cli_runner.go:164] Run: docker container inspect ha-434755 --format={{.State.Status}}
I0919 22:24:26.253812 203160 machine.go:93] provisionDockerMachine start ...
I0919 22:24:26.253917 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755
I0919 22:24:26.271856 203160 main.go:141] libmachine: Using SSH client type: native
I0919 22:24:26.272111 203160 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32783 <nil> <nil>}
I0919 22:24:26.272123 203160 main.go:141] libmachine: About to run SSH command:
hostname
I0919 22:24:26.403852 203160 main.go:141] libmachine: SSH cmd err, output: <nil>: ha-434755
I0919 22:24:26.403887 203160 ubuntu.go:182] provisioning hostname "ha-434755"
I0919 22:24:26.403968 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755
I0919 22:24:26.421146 203160 main.go:141] libmachine: Using SSH client type: native
I0919 22:24:26.421378 203160 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32783 <nil> <nil>}
I0919 22:24:26.421391 203160 main.go:141] libmachine: About to run SSH command:
sudo hostname ha-434755 && echo "ha-434755" | sudo tee /etc/hostname
I0919 22:24:26.565038 203160 main.go:141] libmachine: SSH cmd err, output: <nil>: ha-434755
I0919 22:24:26.565121 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755
I0919 22:24:26.582234 203160 main.go:141] libmachine: Using SSH client type: native
I0919 22:24:26.582443 203160 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32783 <nil> <nil>}
I0919 22:24:26.582460 203160 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\sha-434755' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 ha-434755/g' /etc/hosts;
else
echo '127.0.1.1 ha-434755' | sudo tee -a /etc/hosts;
fi
fi
I0919 22:24:26.715045 203160 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0919 22:24:26.715078 203160 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21594-142711/.minikube CaCertPath:/home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21594-142711/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21594-142711/.minikube}
I0919 22:24:26.715105 203160 ubuntu.go:190] setting up certificates
I0919 22:24:26.715115 203160 provision.go:84] configureAuth start
I0919 22:24:26.715165 203160 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-434755
I0919 22:24:26.732003 203160 provision.go:143] copyHostCerts
I0919 22:24:26.732039 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/cert.pem -> /home/jenkins/minikube-integration/21594-142711/.minikube/cert.pem
I0919 22:24:26.732068 203160 exec_runner.go:144] found /home/jenkins/minikube-integration/21594-142711/.minikube/cert.pem, removing ...
I0919 22:24:26.732077 203160 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21594-142711/.minikube/cert.pem
I0919 22:24:26.732143 203160 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21594-142711/.minikube/cert.pem (1123 bytes)
I0919 22:24:26.732228 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/key.pem -> /home/jenkins/minikube-integration/21594-142711/.minikube/key.pem
I0919 22:24:26.732246 203160 exec_runner.go:144] found /home/jenkins/minikube-integration/21594-142711/.minikube/key.pem, removing ...
I0919 22:24:26.732250 203160 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21594-142711/.minikube/key.pem
I0919 22:24:26.732275 203160 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21594-142711/.minikube/key.pem (1675 bytes)
I0919 22:24:26.732321 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem -> /home/jenkins/minikube-integration/21594-142711/.minikube/ca.pem
I0919 22:24:26.732338 203160 exec_runner.go:144] found /home/jenkins/minikube-integration/21594-142711/.minikube/ca.pem, removing ...
I0919 22:24:26.732344 203160 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21594-142711/.minikube/ca.pem
I0919 22:24:26.732367 203160 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21594-142711/.minikube/ca.pem (1078 bytes)
I0919 22:24:26.732417 203160 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21594-142711/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca-key.pem org=jenkins.ha-434755 san=[127.0.0.1 192.168.49.2 ha-434755 localhost minikube]
I0919 22:24:27.341034 203160 provision.go:177] copyRemoteCerts
I0919 22:24:27.341097 203160 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0919 22:24:27.341134 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755
I0919 22:24:27.360598 203160 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32783 SSHKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755/id_rsa Username:docker}
I0919 22:24:27.455483 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem -> /etc/docker/ca.pem
I0919 22:24:27.455564 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I0919 22:24:27.480468 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/machines/server.pem -> /etc/docker/server.pem
I0919 22:24:27.480525 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/machines/server.pem --> /etc/docker/server.pem (1200 bytes)
I0919 22:24:27.503241 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/machines/server-key.pem -> /etc/docker/server-key.pem
I0919 22:24:27.503287 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I0919 22:24:27.525743 203160 provision.go:87] duration metric: took 810.613663ms to configureAuth
I0919 22:24:27.525768 203160 ubuntu.go:206] setting minikube options for container-runtime
I0919 22:24:27.525921 203160 config.go:182] Loaded profile config "ha-434755": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0919 22:24:27.525973 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755
I0919 22:24:27.542866 203160 main.go:141] libmachine: Using SSH client type: native
I0919 22:24:27.543066 203160 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32783 <nil> <nil>}
I0919 22:24:27.543078 203160 main.go:141] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I0919 22:24:27.675714 203160 main.go:141] libmachine: SSH cmd err, output: <nil>: overlay
I0919 22:24:27.675740 203160 ubuntu.go:71] root file system type: overlay
I0919 22:24:27.675838 203160 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I0919 22:24:27.675893 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755
I0919 22:24:27.693429 203160 main.go:141] libmachine: Using SSH client type: native
I0919 22:24:27.693693 203160 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32783 <nil> <nil>}
I0919 22:24:27.693798 203160 main.go:141] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 \
-H fd:// --containerd=/run/containerd/containerd.sock \
-H unix:///var/run/docker.sock \
--default-ulimit=nofile=1048576:1048576 \
--tlsverify \
--tlscacert /etc/docker/ca.pem \
--tlscert /etc/docker/server.pem \
--tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I0919 22:24:27.843188 203160 main.go:141] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
I0919 22:24:27.843285 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755
I0919 22:24:27.860458 203160 main.go:141] libmachine: Using SSH client type: native
I0919 22:24:27.860715 203160 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32783 <nil> <nil>}
I0919 22:24:27.860742 203160 main.go:141] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I0919 22:24:28.937239 203160 main.go:141] libmachine: SSH cmd err, output: <nil>: --- /lib/systemd/system/docker.service 2025-09-03 20:55:49.000000000 +0000
+++ /lib/systemd/system/docker.service.new 2025-09-19 22:24:27.840752975 +0000
@@ -9,23 +9,34 @@
[Service]
Type=notify
-# the default is not to use systemd for cgroups because the delegate issues still
-# exists and systemd currently does not support the cgroup feature set required
-# for containers run by docker
-ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
-ExecReload=/bin/kill -s HUP $MAINPID
-TimeoutStartSec=0
-RestartSec=2
Restart=always
+
+
+# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
+# The base configuration already specifies an 'ExecStart=...' command. The first directive
+# here is to clear out that command inherited from the base configuration. Without this,
+# the command from the base configuration and the command specified here are treated as
+# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
+# will catch this invalid input and refuse to start the service with an error like:
+# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
+
+# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
+# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
+ExecStart=
+ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
+ExecReload=/bin/kill -s HUP $MAINPID
+
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
+LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
-# Comment TasksMax if your systemd version does not support it.
-# Only systemd 226 and above support this option.
+# Uncomment TasksMax if your systemd version supports it.
+# Only systemd 226 and above support this version.
TasksMax=infinity
+TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
Synchronizing state of docker.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable docker
I0919 22:24:28.937277 203160 machine.go:96] duration metric: took 2.683443018s to provisionDockerMachine
I0919 22:24:28.937292 203160 client.go:171] duration metric: took 7.690121191s to LocalClient.Create
I0919 22:24:28.937318 203160 start.go:167] duration metric: took 7.690191518s to libmachine.API.Create "ha-434755"
I0919 22:24:28.937332 203160 start.go:293] postStartSetup for "ha-434755" (driver="docker")
I0919 22:24:28.937346 203160 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0919 22:24:28.937417 203160 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0919 22:24:28.937468 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755
I0919 22:24:28.955631 203160 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32783 SSHKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755/id_rsa Username:docker}
I0919 22:24:29.052278 203160 ssh_runner.go:195] Run: cat /etc/os-release
I0919 22:24:29.055474 203160 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0919 22:24:29.055519 203160 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0919 22:24:29.055533 203160 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0919 22:24:29.055541 203160 info.go:137] Remote host: Ubuntu 22.04.5 LTS
I0919 22:24:29.055555 203160 filesync.go:126] Scanning /home/jenkins/minikube-integration/21594-142711/.minikube/addons for local assets ...
I0919 22:24:29.055607 203160 filesync.go:126] Scanning /home/jenkins/minikube-integration/21594-142711/.minikube/files for local assets ...
I0919 22:24:29.055697 203160 filesync.go:149] local asset: /home/jenkins/minikube-integration/21594-142711/.minikube/files/etc/ssl/certs/1463352.pem -> 1463352.pem in /etc/ssl/certs
I0919 22:24:29.055708 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/files/etc/ssl/certs/1463352.pem -> /etc/ssl/certs/1463352.pem
I0919 22:24:29.055792 203160 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I0919 22:24:29.064211 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/files/etc/ssl/certs/1463352.pem --> /etc/ssl/certs/1463352.pem (1708 bytes)
I0919 22:24:29.088887 203160 start.go:296] duration metric: took 151.540336ms for postStartSetup
I0919 22:24:29.089170 203160 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-434755
I0919 22:24:29.106927 203160 profile.go:143] Saving config to /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/config.json ...
I0919 22:24:29.107156 203160 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0919 22:24:29.107207 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755
I0919 22:24:29.123683 203160 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32783 SSHKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755/id_rsa Username:docker}
I0919 22:24:29.214129 203160 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0919 22:24:29.218338 203160 start.go:128] duration metric: took 7.973004208s to createHost
I0919 22:24:29.218360 203160 start.go:83] releasing machines lock for "ha-434755", held for 7.973124739s
I0919 22:24:29.218412 203160 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-434755
I0919 22:24:29.236040 203160 ssh_runner.go:195] Run: cat /version.json
I0919 22:24:29.236081 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755
I0919 22:24:29.236126 203160 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0919 22:24:29.236195 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755
I0919 22:24:29.253449 203160 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32783 SSHKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755/id_rsa Username:docker}
I0919 22:24:29.253827 203160 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32783 SSHKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755/id_rsa Username:docker}
I0919 22:24:29.414344 203160 ssh_runner.go:195] Run: systemctl --version
I0919 22:24:29.418771 203160 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0919 22:24:29.423119 203160 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I0919 22:24:29.450494 203160 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I0919 22:24:29.450577 203160 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0919 22:24:29.475768 203160 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I0919 22:24:29.475797 203160 start.go:495] detecting cgroup driver to use...
I0919 22:24:29.475832 203160 detect.go:190] detected "systemd" cgroup driver on host os
I0919 22:24:29.475949 203160 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0919 22:24:29.491395 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I0919 22:24:29.501756 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0919 22:24:29.511013 203160 containerd.go:146] configuring containerd to use "systemd" as cgroup driver...
I0919 22:24:29.511066 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I0919 22:24:29.520269 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0919 22:24:29.529232 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0919 22:24:29.538263 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0919 22:24:29.547175 203160 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0919 22:24:29.555699 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0919 22:24:29.564644 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0919 22:24:29.573613 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0919 22:24:29.582664 203160 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0919 22:24:29.590362 203160 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0919 22:24:29.598040 203160 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0919 22:24:29.662901 203160 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0919 22:24:29.737694 203160 start.go:495] detecting cgroup driver to use...
I0919 22:24:29.737750 203160 detect.go:190] detected "systemd" cgroup driver on host os
I0919 22:24:29.737804 203160 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I0919 22:24:29.750261 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I0919 22:24:29.761088 203160 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
I0919 22:24:29.781368 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I0919 22:24:29.792667 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0919 22:24:29.803679 203160 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I0919 22:24:29.819981 203160 ssh_runner.go:195] Run: which cri-dockerd
I0919 22:24:29.823528 203160 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I0919 22:24:29.833551 203160 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (192 bytes)
I0919 22:24:29.851373 203160 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I0919 22:24:29.919426 203160 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I0919 22:24:29.982907 203160 docker.go:575] configuring docker to use "systemd" as cgroup driver...
I0919 22:24:29.983042 203160 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (129 bytes)
I0919 22:24:30.001192 203160 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
I0919 22:24:30.012142 203160 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0919 22:24:30.077304 203160 ssh_runner.go:195] Run: sudo systemctl restart docker
I0919 22:24:30.841187 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I0919 22:24:30.852558 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I0919 22:24:30.863819 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0919 22:24:30.874629 203160 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I0919 22:24:30.936849 203160 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I0919 22:24:30.998282 203160 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0919 22:24:31.059613 203160 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I0919 22:24:31.085894 203160 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I0919 22:24:31.097613 203160 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0919 22:24:31.165516 203160 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I0919 22:24:31.237651 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0919 22:24:31.250126 203160 start.go:542] Will wait 60s for socket path /var/run/cri-dockerd.sock
I0919 22:24:31.250193 203160 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I0919 22:24:31.253768 203160 start.go:563] Will wait 60s for crictl version
I0919 22:24:31.253815 203160 ssh_runner.go:195] Run: which crictl
I0919 22:24:31.257175 203160 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0919 22:24:31.291330 203160 start.go:579] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 28.4.0
RuntimeApiVersion: v1
I0919 22:24:31.291400 203160 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0919 22:24:31.316224 203160 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0919 22:24:31.343571 203160 out.go:252] * Preparing Kubernetes v1.34.0 on Docker 28.4.0 ...
I0919 22:24:31.343639 203160 cli_runner.go:164] Run: docker network inspect ha-434755 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0919 22:24:31.360312 203160 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I0919 22:24:31.364394 203160 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0919 22:24:31.376325 203160 kubeadm.go:875] updating cluster {Name:ha-434755 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:ha-434755 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerNames:[] APIServerIP
s:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath:
SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I0919 22:24:31.376429 203160 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime docker
I0919 22:24:31.376472 203160 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0919 22:24:31.396685 203160 docker.go:691] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.34.0
registry.k8s.io/kube-scheduler:v1.34.0
registry.k8s.io/kube-controller-manager:v1.34.0
registry.k8s.io/kube-proxy:v1.34.0
registry.k8s.io/etcd:3.6.4-0
registry.k8s.io/pause:3.10.1
registry.k8s.io/coredns/coredns:v1.12.1
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I0919 22:24:31.396706 203160 docker.go:621] Images already preloaded, skipping extraction
I0919 22:24:31.396777 203160 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0919 22:24:31.417311 203160 docker.go:691] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.34.0
registry.k8s.io/kube-scheduler:v1.34.0
registry.k8s.io/kube-controller-manager:v1.34.0
registry.k8s.io/kube-proxy:v1.34.0
registry.k8s.io/etcd:3.6.4-0
registry.k8s.io/pause:3.10.1
registry.k8s.io/coredns/coredns:v1.12.1
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I0919 22:24:31.417334 203160 cache_images.go:85] Images are preloaded, skipping loading
I0919 22:24:31.417348 203160 kubeadm.go:926] updating node { 192.168.49.2 8443 v1.34.0 docker true true} ...
I0919 22:24:31.417454 203160 kubeadm.go:938] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=ha-434755 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.2
[Install]
config:
{KubernetesVersion:v1.34.0 ClusterName:ha-434755 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0919 22:24:31.417533 203160 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
I0919 22:24:31.468906 203160 cni.go:84] Creating CNI manager for ""
I0919 22:24:31.468934 203160 cni.go:136] multinode detected (1 nodes found), recommending kindnet
I0919 22:24:31.468949 203160 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
I0919 22:24:31.468980 203160 kubeadm.go:189] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8443 KubernetesVersion:v1.34.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:ha-434755 NodeName:ha-434755 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.49.2 CgroupDriver:systemd ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kubernetes/man
ifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I0919 22:24:31.469131 203160 kubeadm.go:195] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.49.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
name: "ha-434755"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.49.2"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.34.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I0919 22:24:31.469170 203160 kube-vip.go:115] generating kube-vip config ...
I0919 22:24:31.469222 203160 ssh_runner.go:195] Run: sudo sh -c "lsmod | grep ip_vs"
I0919 22:24:31.481888 203160 kube-vip.go:163] giving up enabling control-plane load-balancing as ipvs kernel modules appears not to be available: sudo sh -c "lsmod | grep ip_vs": Process exited with status 1
stdout:
stderr:
I0919 22:24:31.481979 203160 kube-vip.go:137] kube-vip config:
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
name: kube-vip
namespace: kube-system
spec:
containers:
- args:
- manager
env:
- name: vip_arp
value: "true"
- name: port
value: "8443"
- name: vip_nodename
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: vip_interface
value: eth0
- name: vip_cidr
value: "32"
- name: dns_mode
value: first
- name: cp_enable
value: "true"
- name: cp_namespace
value: kube-system
- name: vip_leaderelection
value: "true"
- name: vip_leasename
value: plndr-cp-lock
- name: vip_leaseduration
value: "5"
- name: vip_renewdeadline
value: "3"
- name: vip_retryperiod
value: "1"
- name: address
value: 192.168.49.254
- name: prometheus_server
value: :2112
image: ghcr.io/kube-vip/kube-vip:v1.0.0
imagePullPolicy: IfNotPresent
name: kube-vip
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_RAW
volumeMounts:
- mountPath: /etc/kubernetes/admin.conf
name: kubeconfig
hostAliases:
- hostnames:
- kubernetes
ip: 127.0.0.1
hostNetwork: true
volumes:
- hostPath:
path: "/etc/kubernetes/super-admin.conf"
name: kubeconfig
status: {}
I0919 22:24:31.482024 203160 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.0
I0919 22:24:31.490896 203160 binaries.go:44] Found k8s binaries, skipping transfer
I0919 22:24:31.490954 203160 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube /etc/kubernetes/manifests
I0919 22:24:31.499752 203160 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (308 bytes)
I0919 22:24:31.517642 203160 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0919 22:24:31.535661 203160 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2209 bytes)
I0919 22:24:31.552926 203160 ssh_runner.go:362] scp memory --> /etc/kubernetes/manifests/kube-vip.yaml (1364 bytes)
I0919 22:24:31.572177 203160 ssh_runner.go:195] Run: grep 192.168.49.254 control-plane.minikube.internal$ /etc/hosts
I0919 22:24:31.575892 203160 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.254 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0919 22:24:31.587094 203160 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0919 22:24:31.654039 203160 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0919 22:24:31.678017 203160 certs.go:68] Setting up /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755 for IP: 192.168.49.2
I0919 22:24:31.678046 203160 certs.go:194] generating shared ca certs ...
I0919 22:24:31.678070 203160 certs.go:226] acquiring lock for ca certs: {Name:mkc5df652d6204fd8687dfaaf83b02c6e10b58b2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:24:31.678228 203160 certs.go:235] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21594-142711/.minikube/ca.key
I0919 22:24:31.678271 203160 certs.go:235] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21594-142711/.minikube/proxy-client-ca.key
I0919 22:24:31.678281 203160 certs.go:256] generating profile certs ...
I0919 22:24:31.678337 203160 certs.go:363] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/client.key
I0919 22:24:31.678354 203160 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/client.crt with IP's: []
I0919 22:24:31.857665 203160 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/client.crt ...
I0919 22:24:31.857696 203160 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/client.crt: {Name:mk7ec51226de11d757f14966ffd43a2037698787 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:24:31.857881 203160 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/client.key ...
I0919 22:24:31.857892 203160 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/client.key: {Name:mkf584fffef919693714a07e5a88b44eca7219c8 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:24:31.857971 203160 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.key.9c8d1cb8
I0919 22:24:31.857986 203160 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.crt.9c8d1cb8 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.49.2 192.168.49.254]
I0919 22:24:32.133506 203160 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.crt.9c8d1cb8 ...
I0919 22:24:32.133540 203160 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.crt.9c8d1cb8: {Name:mkb81ce84ef58bc410b7449c932fc5a925016309 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:24:32.133711 203160 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.key.9c8d1cb8 ...
I0919 22:24:32.133729 203160 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.key.9c8d1cb8: {Name:mk079553ff6e398f68775f47e1ad8c0a1a64a140 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:24:32.133803 203160 certs.go:381] copying /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.crt.9c8d1cb8 -> /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.crt
I0919 22:24:32.133908 203160 certs.go:385] copying /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.key.9c8d1cb8 -> /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.key
I0919 22:24:32.133973 203160 certs.go:363] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/proxy-client.key
I0919 22:24:32.133989 203160 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/proxy-client.crt with IP's: []
I0919 22:24:32.385885 203160 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/proxy-client.crt ...
I0919 22:24:32.385919 203160 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/proxy-client.crt: {Name:mk3bec5b301362978b2b3b81fd3c21d3f704e1cb Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:24:32.386084 203160 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/proxy-client.key ...
I0919 22:24:32.386097 203160 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/proxy-client.key: {Name:mk9670132fab0c6814f19a454e4e08b86e71aeae Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:24:32.386174 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/ca.crt -> /var/lib/minikube/certs/ca.crt
I0919 22:24:32.386207 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/ca.key -> /var/lib/minikube/certs/ca.key
I0919 22:24:32.386221 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/proxy-client-ca.crt -> /var/lib/minikube/certs/proxy-client-ca.crt
I0919 22:24:32.386234 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/proxy-client-ca.key -> /var/lib/minikube/certs/proxy-client-ca.key
I0919 22:24:32.386246 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.crt -> /var/lib/minikube/certs/apiserver.crt
I0919 22:24:32.386271 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.key -> /var/lib/minikube/certs/apiserver.key
I0919 22:24:32.386283 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/proxy-client.crt -> /var/lib/minikube/certs/proxy-client.crt
I0919 22:24:32.386292 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/proxy-client.key -> /var/lib/minikube/certs/proxy-client.key
I0919 22:24:32.386341 203160 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/146335.pem (1338 bytes)
W0919 22:24:32.386378 203160 certs.go:480] ignoring /home/jenkins/minikube-integration/21594-142711/.minikube/certs/146335_empty.pem, impossibly tiny 0 bytes
I0919 22:24:32.386388 203160 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca-key.pem (1675 bytes)
I0919 22:24:32.386418 203160 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem (1078 bytes)
I0919 22:24:32.386443 203160 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/cert.pem (1123 bytes)
I0919 22:24:32.386467 203160 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/key.pem (1675 bytes)
I0919 22:24:32.386517 203160 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-142711/.minikube/files/etc/ssl/certs/1463352.pem (1708 bytes)
I0919 22:24:32.386548 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/files/etc/ssl/certs/1463352.pem -> /usr/share/ca-certificates/1463352.pem
I0919 22:24:32.386562 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/ca.crt -> /usr/share/ca-certificates/minikubeCA.pem
I0919 22:24:32.386574 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/146335.pem -> /usr/share/ca-certificates/146335.pem
I0919 22:24:32.387195 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0919 22:24:32.413179 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
I0919 22:24:32.437860 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0919 22:24:32.462719 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I0919 22:24:32.488640 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
I0919 22:24:32.513281 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
I0919 22:24:32.536826 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0919 22:24:32.559540 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I0919 22:24:32.582215 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/files/etc/ssl/certs/1463352.pem --> /usr/share/ca-certificates/1463352.pem (1708 bytes)
I0919 22:24:32.607378 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0919 22:24:32.629686 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/certs/146335.pem --> /usr/share/ca-certificates/146335.pem (1338 bytes)
I0919 22:24:32.651946 203160 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I0919 22:24:32.668687 203160 ssh_runner.go:195] Run: openssl version
I0919 22:24:32.673943 203160 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0919 22:24:32.683156 203160 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0919 22:24:32.686577 203160 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Sep 19 22:15 /usr/share/ca-certificates/minikubeCA.pem
I0919 22:24:32.686633 203160 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0919 22:24:32.693223 203160 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0919 22:24:32.702177 203160 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/146335.pem && ln -fs /usr/share/ca-certificates/146335.pem /etc/ssl/certs/146335.pem"
I0919 22:24:32.711521 203160 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/146335.pem
I0919 22:24:32.714732 203160 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Sep 19 22:20 /usr/share/ca-certificates/146335.pem
I0919 22:24:32.714766 203160 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/146335.pem
I0919 22:24:32.721219 203160 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/146335.pem /etc/ssl/certs/51391683.0"
I0919 22:24:32.730116 203160 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/1463352.pem && ln -fs /usr/share/ca-certificates/1463352.pem /etc/ssl/certs/1463352.pem"
I0919 22:24:32.739018 203160 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/1463352.pem
I0919 22:24:32.742287 203160 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Sep 19 22:20 /usr/share/ca-certificates/1463352.pem
I0919 22:24:32.742330 203160 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/1463352.pem
I0919 22:24:32.748703 203160 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/1463352.pem /etc/ssl/certs/3ec20f2e.0"
I0919 22:24:32.757370 203160 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0919 22:24:32.760542 203160 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I0919 22:24:32.760590 203160 kubeadm.go:392] StartCluster: {Name:ha-434755 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:ha-434755 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[
] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: So
cketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0919 22:24:32.760710 203160 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I0919 22:24:32.778911 203160 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I0919 22:24:32.787673 203160 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I0919 22:24:32.796245 203160 kubeadm.go:214] ignoring SystemVerification for kubeadm because of docker driver
I0919 22:24:32.796280 203160 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I0919 22:24:32.804896 203160 kubeadm.go:155] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I0919 22:24:32.804909 203160 kubeadm.go:157] found existing configuration files:
I0919 22:24:32.804937 203160 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I0919 22:24:32.813189 203160 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I0919 22:24:32.813229 203160 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I0919 22:24:32.821160 203160 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I0919 22:24:32.829194 203160 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I0919 22:24:32.829245 203160 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I0919 22:24:32.837031 203160 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I0919 22:24:32.845106 203160 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I0919 22:24:32.845150 203160 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I0919 22:24:32.853133 203160 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I0919 22:24:32.861349 203160 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I0919 22:24:32.861390 203160 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I0919 22:24:32.869355 203160 ssh_runner.go:286] Start: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I0919 22:24:32.905932 203160 kubeadm.go:310] [init] Using Kubernetes version: v1.34.0
I0919 22:24:32.906264 203160 kubeadm.go:310] [preflight] Running pre-flight checks
I0919 22:24:32.922979 203160 kubeadm.go:310] [preflight] The system verification failed. Printing the output from the verification:
I0919 22:24:32.923110 203160 kubeadm.go:310] [0;37mKERNEL_VERSION[0m: [0;32m6.8.0-1037-gcp[0m
I0919 22:24:32.923168 203160 kubeadm.go:310] [0;37mOS[0m: [0;32mLinux[0m
I0919 22:24:32.923231 203160 kubeadm.go:310] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I0919 22:24:32.923291 203160 kubeadm.go:310] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I0919 22:24:32.923361 203160 kubeadm.go:310] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I0919 22:24:32.923426 203160 kubeadm.go:310] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I0919 22:24:32.923486 203160 kubeadm.go:310] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I0919 22:24:32.923570 203160 kubeadm.go:310] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I0919 22:24:32.923633 203160 kubeadm.go:310] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I0919 22:24:32.923686 203160 kubeadm.go:310] [0;37mCGROUPS_IO[0m: [0;32menabled[0m
I0919 22:24:32.975656 203160 kubeadm.go:310] [preflight] Pulling images required for setting up a Kubernetes cluster
I0919 22:24:32.975772 203160 kubeadm.go:310] [preflight] This might take a minute or two, depending on the speed of your internet connection
I0919 22:24:32.975923 203160 kubeadm.go:310] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I0919 22:24:32.987123 203160 kubeadm.go:310] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I0919 22:24:32.990614 203160 out.go:252] - Generating certificates and keys ...
I0919 22:24:32.990701 203160 kubeadm.go:310] [certs] Using existing ca certificate authority
I0919 22:24:32.990790 203160 kubeadm.go:310] [certs] Using existing apiserver certificate and key on disk
I0919 22:24:33.305563 203160 kubeadm.go:310] [certs] Generating "apiserver-kubelet-client" certificate and key
I0919 22:24:33.403579 203160 kubeadm.go:310] [certs] Generating "front-proxy-ca" certificate and key
I0919 22:24:33.794985 203160 kubeadm.go:310] [certs] Generating "front-proxy-client" certificate and key
I0919 22:24:33.939882 203160 kubeadm.go:310] [certs] Generating "etcd/ca" certificate and key
I0919 22:24:34.319905 203160 kubeadm.go:310] [certs] Generating "etcd/server" certificate and key
I0919 22:24:34.320050 203160 kubeadm.go:310] [certs] etcd/server serving cert is signed for DNS names [ha-434755 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0919 22:24:34.571803 203160 kubeadm.go:310] [certs] Generating "etcd/peer" certificate and key
I0919 22:24:34.572036 203160 kubeadm.go:310] [certs] etcd/peer serving cert is signed for DNS names [ha-434755 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0919 22:24:34.785683 203160 kubeadm.go:310] [certs] Generating "etcd/healthcheck-client" certificate and key
I0919 22:24:34.913179 203160 kubeadm.go:310] [certs] Generating "apiserver-etcd-client" certificate and key
I0919 22:24:35.193757 203160 kubeadm.go:310] [certs] Generating "sa" key and public key
I0919 22:24:35.193908 203160 kubeadm.go:310] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I0919 22:24:35.269921 203160 kubeadm.go:310] [kubeconfig] Writing "admin.conf" kubeconfig file
I0919 22:24:35.432895 203160 kubeadm.go:310] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I0919 22:24:35.889148 203160 kubeadm.go:310] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I0919 22:24:36.099682 203160 kubeadm.go:310] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I0919 22:24:36.370632 203160 kubeadm.go:310] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I0919 22:24:36.371101 203160 kubeadm.go:310] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I0919 22:24:36.373221 203160 kubeadm.go:310] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I0919 22:24:36.375010 203160 out.go:252] - Booting up control plane ...
I0919 22:24:36.375112 203160 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-apiserver"
I0919 22:24:36.375205 203160 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I0919 22:24:36.375823 203160 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-scheduler"
I0919 22:24:36.385552 203160 kubeadm.go:310] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I0919 22:24:36.385660 203160 kubeadm.go:310] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
I0919 22:24:36.391155 203160 kubeadm.go:310] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
I0919 22:24:36.391446 203160 kubeadm.go:310] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I0919 22:24:36.391516 203160 kubeadm.go:310] [kubelet-start] Starting the kubelet
I0919 22:24:36.469169 203160 kubeadm.go:310] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I0919 22:24:36.469341 203160 kubeadm.go:310] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I0919 22:24:37.470960 203160 kubeadm.go:310] [kubelet-check] The kubelet is healthy after 1.001771868s
I0919 22:24:37.475271 203160 kubeadm.go:310] [control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s
I0919 22:24:37.475402 203160 kubeadm.go:310] [control-plane-check] Checking kube-apiserver at https://192.168.49.2:8443/livez
I0919 22:24:37.475560 203160 kubeadm.go:310] [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz
I0919 22:24:37.475683 203160 kubeadm.go:310] [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez
I0919 22:24:38.691996 203160 kubeadm.go:310] [control-plane-check] kube-controller-manager is healthy after 1.216651105s
I0919 22:24:39.748252 203160 kubeadm.go:310] [control-plane-check] kube-scheduler is healthy after 2.272903249s
I0919 22:24:43.641652 203160 kubeadm.go:310] [control-plane-check] kube-apiserver is healthy after 6.166322635s
I0919 22:24:43.652285 203160 kubeadm.go:310] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I0919 22:24:43.662136 203160 kubeadm.go:310] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I0919 22:24:43.670817 203160 kubeadm.go:310] [upload-certs] Skipping phase. Please see --upload-certs
I0919 22:24:43.671109 203160 kubeadm.go:310] [mark-control-plane] Marking the node ha-434755 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I0919 22:24:43.678157 203160 kubeadm.go:310] [bootstrap-token] Using token: g87idd.cyuzs8jougdixinx
I0919 22:24:43.679741 203160 out.go:252] - Configuring RBAC rules ...
I0919 22:24:43.679886 203160 kubeadm.go:310] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I0919 22:24:43.685914 203160 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I0919 22:24:43.691061 203160 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I0919 22:24:43.693550 203160 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I0919 22:24:43.697628 203160 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I0919 22:24:43.699973 203160 kubeadm.go:310] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I0919 22:24:44.047466 203160 kubeadm.go:310] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I0919 22:24:44.461485 203160 kubeadm.go:310] [addons] Applied essential addon: CoreDNS
I0919 22:24:45.047812 203160 kubeadm.go:310] [addons] Applied essential addon: kube-proxy
I0919 22:24:45.048594 203160 kubeadm.go:310]
I0919 22:24:45.048685 203160 kubeadm.go:310] Your Kubernetes control-plane has initialized successfully!
I0919 22:24:45.048725 203160 kubeadm.go:310]
I0919 22:24:45.048861 203160 kubeadm.go:310] To start using your cluster, you need to run the following as a regular user:
I0919 22:24:45.048871 203160 kubeadm.go:310]
I0919 22:24:45.048906 203160 kubeadm.go:310] mkdir -p $HOME/.kube
I0919 22:24:45.049005 203160 kubeadm.go:310] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I0919 22:24:45.049058 203160 kubeadm.go:310] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I0919 22:24:45.049064 203160 kubeadm.go:310]
I0919 22:24:45.049110 203160 kubeadm.go:310] Alternatively, if you are the root user, you can run:
I0919 22:24:45.049131 203160 kubeadm.go:310]
I0919 22:24:45.049219 203160 kubeadm.go:310] export KUBECONFIG=/etc/kubernetes/admin.conf
I0919 22:24:45.049232 203160 kubeadm.go:310]
I0919 22:24:45.049278 203160 kubeadm.go:310] You should now deploy a pod network to the cluster.
I0919 22:24:45.049339 203160 kubeadm.go:310] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I0919 22:24:45.049394 203160 kubeadm.go:310] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I0919 22:24:45.049400 203160 kubeadm.go:310]
I0919 22:24:45.049474 203160 kubeadm.go:310] You can now join any number of control-plane nodes by copying certificate authorities
I0919 22:24:45.049614 203160 kubeadm.go:310] and service account keys on each node and then running the following as root:
I0919 22:24:45.049627 203160 kubeadm.go:310]
I0919 22:24:45.049721 203160 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token g87idd.cyuzs8jougdixinx \
I0919 22:24:45.049859 203160 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:6e34938835ca5de20dcd743043ff221a1493ef970b34561f39a513839570935a \
I0919 22:24:45.049895 203160 kubeadm.go:310] --control-plane
I0919 22:24:45.049904 203160 kubeadm.go:310]
I0919 22:24:45.050015 203160 kubeadm.go:310] Then you can join any number of worker nodes by running the following on each as root:
I0919 22:24:45.050028 203160 kubeadm.go:310]
I0919 22:24:45.050110 203160 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token g87idd.cyuzs8jougdixinx \
I0919 22:24:45.050212 203160 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:6e34938835ca5de20dcd743043ff221a1493ef970b34561f39a513839570935a
I0919 22:24:45.053328 203160 kubeadm.go:310] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/6.8.0-1037-gcp\n", err: exit status 1
I0919 22:24:45.053440 203160 kubeadm.go:310] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I0919 22:24:45.053459 203160 cni.go:84] Creating CNI manager for ""
I0919 22:24:45.053466 203160 cni.go:136] multinode detected (1 nodes found), recommending kindnet
I0919 22:24:45.054970 203160 out.go:179] * Configuring CNI (Container Networking Interface) ...
I0919 22:24:45.056059 203160 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I0919 22:24:45.060192 203160 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.34.0/kubectl ...
I0919 22:24:45.060207 203160 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I0919 22:24:45.078671 203160 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I0919 22:24:45.281468 203160 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I0919 22:24:45.281585 203160 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I0919 22:24:45.281587 203160 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes ha-434755 minikube.k8s.io/updated_at=2025_09_19T22_24_45_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=6e37ee63f758843bb5fe33c3a528c564c4b83d53 minikube.k8s.io/name=ha-434755 minikube.k8s.io/primary=true
I0919 22:24:45.374035 203160 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0919 22:24:45.378242 203160 ops.go:34] apiserver oom_adj: -16
I0919 22:24:45.874252 203160 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0919 22:24:46.375078 203160 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0919 22:24:46.874791 203160 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0919 22:24:46.939251 203160 kubeadm.go:1105] duration metric: took 1.657752945s to wait for elevateKubeSystemPrivileges
I0919 22:24:46.939292 203160 kubeadm.go:394] duration metric: took 14.17870588s to StartCluster
I0919 22:24:46.939313 203160 settings.go:142] acquiring lock: {Name:mk0ff94a55db11c0f045ab7f983bc46c653527ba Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:24:46.939381 203160 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/21594-142711/kubeconfig
I0919 22:24:46.940075 203160 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21594-142711/kubeconfig: {Name:mk4ed26fa289682c072e02c721ecb5e9a371ed27 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:24:46.940315 203160 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I0919 22:24:46.940328 203160 start.go:233] HA (multi-control plane) cluster: will skip waiting for primary control-plane node &{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}
I0919 22:24:46.940349 203160 start.go:241] waiting for startup goroutines ...
I0919 22:24:46.940375 203160 addons.go:511] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I0919 22:24:46.940455 203160 addons.go:69] Setting storage-provisioner=true in profile "ha-434755"
I0919 22:24:46.940480 203160 addons.go:69] Setting default-storageclass=true in profile "ha-434755"
I0919 22:24:46.940526 203160 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "ha-434755"
I0919 22:24:46.940484 203160 addons.go:238] Setting addon storage-provisioner=true in "ha-434755"
I0919 22:24:46.940592 203160 config.go:182] Loaded profile config "ha-434755": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0919 22:24:46.940622 203160 host.go:66] Checking if "ha-434755" exists ...
I0919 22:24:46.940889 203160 cli_runner.go:164] Run: docker container inspect ha-434755 --format={{.State.Status}}
I0919 22:24:46.941141 203160 cli_runner.go:164] Run: docker container inspect ha-434755 --format={{.State.Status}}
I0919 22:24:46.961198 203160 kapi.go:59] client config for ha-434755: &rest.Config{Host:"https://192.168.49.254:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/client.crt", KeyFile:"/home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/client.key", CAFile:"/home/jenkins/minikube-integration/21594-142711/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(nil), CAData:[]uint8(nil), NextProtos:[]string(n
il)}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x27f4a00), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), WarningHandlerWithContext:rest.WarningHandlerWithContext(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
I0919 22:24:46.961822 203160 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false
I0919 22:24:46.961843 203160 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false
I0919 22:24:46.961849 203160 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false
I0919 22:24:46.961854 203160 envvar.go:172] "Feature gate default state" feature="InOrderInformers" enabled=true
I0919 22:24:46.961858 203160 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false
I0919 22:24:46.961927 203160 cert_rotation.go:141] "Starting client certificate rotation controller" logger="tls-transport-cache"
I0919 22:24:46.962245 203160 addons.go:238] Setting addon default-storageclass=true in "ha-434755"
I0919 22:24:46.962289 203160 host.go:66] Checking if "ha-434755" exists ...
I0919 22:24:46.962659 203160 cli_runner.go:164] Run: docker container inspect ha-434755 --format={{.State.Status}}
I0919 22:24:46.962840 203160 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I0919 22:24:46.964064 203160 addons.go:435] installing /etc/kubernetes/addons/storage-provisioner.yaml
I0919 22:24:46.964085 203160 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I0919 22:24:46.964143 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755
I0919 22:24:46.980987 203160 addons.go:435] installing /etc/kubernetes/addons/storageclass.yaml
I0919 22:24:46.981012 203160 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I0919 22:24:46.981083 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755
I0919 22:24:46.985677 203160 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32783 SSHKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755/id_rsa Username:docker}
I0919 22:24:46.998945 203160 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32783 SSHKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755/id_rsa Username:docker}
I0919 22:24:47.020097 203160 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I0919 22:24:47.098011 203160 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I0919 22:24:47.110913 203160 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I0919 22:24:47.173952 203160 start.go:976] {"host.minikube.internal": 192.168.49.1} host record injected into CoreDNS's ConfigMap
I0919 22:24:47.362290 203160 out.go:179] * Enabled addons: storage-provisioner, default-storageclass
I0919 22:24:47.363580 203160 addons.go:514] duration metric: took 423.211287ms for enable addons: enabled=[storage-provisioner default-storageclass]
I0919 22:24:47.363630 203160 start.go:246] waiting for cluster config update ...
I0919 22:24:47.363647 203160 start.go:255] writing updated cluster config ...
I0919 22:24:47.364969 203160 out.go:203]
I0919 22:24:47.366064 203160 config.go:182] Loaded profile config "ha-434755": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0919 22:24:47.366127 203160 profile.go:143] Saving config to /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/config.json ...
I0919 22:24:47.367471 203160 out.go:179] * Starting "ha-434755-m02" control-plane node in "ha-434755" cluster
I0919 22:24:47.368387 203160 cache.go:123] Beginning downloading kic base image for docker with docker
I0919 22:24:47.369440 203160 out.go:179] * Pulling base image v0.0.48 ...
I0919 22:24:47.370378 203160 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime docker
I0919 22:24:47.370397 203160 cache.go:58] Caching tarball of preloaded images
I0919 22:24:47.370461 203160 image.go:81] Checking for gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local docker daemon
I0919 22:24:47.370513 203160 preload.go:172] Found /home/jenkins/minikube-integration/21594-142711/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-amd64.tar.lz4 in cache, skipping download
I0919 22:24:47.370529 203160 cache.go:61] Finished verifying existence of preloaded tar for v1.34.0 on docker
I0919 22:24:47.370620 203160 profile.go:143] Saving config to /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/config.json ...
I0919 22:24:47.391559 203160 image.go:100] Found gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local docker daemon, skipping pull
I0919 22:24:47.391581 203160 cache.go:147] gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 exists in daemon, skipping load
I0919 22:24:47.391603 203160 cache.go:232] Successfully downloaded all kic artifacts
I0919 22:24:47.391635 203160 start.go:360] acquireMachinesLock for ha-434755-m02: {Name:mk9ca5ab09eecc208a09b7d4c6860cdbcbbd1861 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0919 22:24:47.391801 203160 start.go:364] duration metric: took 141.515µs to acquireMachinesLock for "ha-434755-m02"
I0919 22:24:47.391835 203160 start.go:93] Provisioning new machine with config: &{Name:ha-434755 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:ha-434755 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerN
ames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true} {Name:m02 IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[]
MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name:m02 IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}
I0919 22:24:47.391926 203160 start.go:125] createHost starting for "m02" (driver="docker")
I0919 22:24:47.393797 203160 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I0919 22:24:47.393909 203160 start.go:159] libmachine.API.Create for "ha-434755" (driver="docker")
I0919 22:24:47.393934 203160 client.go:168] LocalClient.Create starting
I0919 22:24:47.393999 203160 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem
I0919 22:24:47.394037 203160 main.go:141] libmachine: Decoding PEM data...
I0919 22:24:47.394072 203160 main.go:141] libmachine: Parsing certificate...
I0919 22:24:47.394137 203160 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21594-142711/.minikube/certs/cert.pem
I0919 22:24:47.394163 203160 main.go:141] libmachine: Decoding PEM data...
I0919 22:24:47.394178 203160 main.go:141] libmachine: Parsing certificate...
I0919 22:24:47.394368 203160 cli_runner.go:164] Run: docker network inspect ha-434755 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0919 22:24:47.411751 203160 network_create.go:77] Found existing network {name:ha-434755 subnet:0xc0016fd680 gateway:[0 0 0 0 0 0 0 0 0 0 255 255 192 168 49 1] mtu:1500}
I0919 22:24:47.411805 203160 kic.go:121] calculated static IP "192.168.49.3" for the "ha-434755-m02" container
I0919 22:24:47.411877 203160 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0919 22:24:47.428826 203160 cli_runner.go:164] Run: docker volume create ha-434755-m02 --label name.minikube.sigs.k8s.io=ha-434755-m02 --label created_by.minikube.sigs.k8s.io=true
I0919 22:24:47.446551 203160 oci.go:103] Successfully created a docker volume ha-434755-m02
I0919 22:24:47.446629 203160 cli_runner.go:164] Run: docker run --rm --name ha-434755-m02-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=ha-434755-m02 --entrypoint /usr/bin/test -v ha-434755-m02:/var gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -d /var/lib
I0919 22:24:47.837811 203160 oci.go:107] Successfully prepared a docker volume ha-434755-m02
I0919 22:24:47.837861 203160 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime docker
I0919 22:24:47.837884 203160 kic.go:194] Starting extracting preloaded images to volume ...
I0919 22:24:47.837943 203160 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21594-142711/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v ha-434755-m02:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -I lz4 -xf /preloaded.tar -C /extractDir
I0919 22:24:51.165942 203160 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21594-142711/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v ha-434755-m02:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -I lz4 -xf /preloaded.tar -C /extractDir: (3.327954443s)
I0919 22:24:51.165985 203160 kic.go:203] duration metric: took 3.328094858s to extract preloaded images to volume ...
W0919 22:24:51.166081 203160 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
W0919 22:24:51.166111 203160 oci.go:252] Your kernel does not support CPU cfs period/quota or the cgroup is not mounted.
I0919 22:24:51.166151 203160 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0919 22:24:51.222283 203160 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname ha-434755-m02 --name ha-434755-m02 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=ha-434755-m02 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=ha-434755-m02 --network ha-434755 --ip 192.168.49.3 --volume ha-434755-m02:/var --security-opt apparmor=unconfined --memory=3072mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1
I0919 22:24:51.469867 203160 cli_runner.go:164] Run: docker container inspect ha-434755-m02 --format={{.State.Running}}
I0919 22:24:51.487954 203160 cli_runner.go:164] Run: docker container inspect ha-434755-m02 --format={{.State.Status}}
I0919 22:24:51.506846 203160 cli_runner.go:164] Run: docker exec ha-434755-m02 stat /var/lib/dpkg/alternatives/iptables
I0919 22:24:51.559220 203160 oci.go:144] the created container "ha-434755-m02" has a running status.
I0919 22:24:51.559254 203160 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755-m02/id_rsa...
I0919 22:24:51.766973 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755-m02/id_rsa.pub -> /home/docker/.ssh/authorized_keys
I0919 22:24:51.767017 203160 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755-m02/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0919 22:24:51.797620 203160 cli_runner.go:164] Run: docker container inspect ha-434755-m02 --format={{.State.Status}}
I0919 22:24:51.823671 203160 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0919 22:24:51.823693 203160 kic_runner.go:114] Args: [docker exec --privileged ha-434755-m02 chown docker:docker /home/docker/.ssh/authorized_keys]
I0919 22:24:51.878635 203160 cli_runner.go:164] Run: docker container inspect ha-434755-m02 --format={{.State.Status}}
I0919 22:24:51.902762 203160 machine.go:93] provisionDockerMachine start ...
I0919 22:24:51.902873 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755-m02
I0919 22:24:51.926268 203160 main.go:141] libmachine: Using SSH client type: native
I0919 22:24:51.926707 203160 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32788 <nil> <nil>}
I0919 22:24:51.926729 203160 main.go:141] libmachine: About to run SSH command:
hostname
I0919 22:24:52.076154 203160 main.go:141] libmachine: SSH cmd err, output: <nil>: ha-434755-m02
I0919 22:24:52.076188 203160 ubuntu.go:182] provisioning hostname "ha-434755-m02"
I0919 22:24:52.076259 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755-m02
I0919 22:24:52.099415 203160 main.go:141] libmachine: Using SSH client type: native
I0919 22:24:52.099841 203160 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32788 <nil> <nil>}
I0919 22:24:52.099873 203160 main.go:141] libmachine: About to run SSH command:
sudo hostname ha-434755-m02 && echo "ha-434755-m02" | sudo tee /etc/hostname
I0919 22:24:52.261548 203160 main.go:141] libmachine: SSH cmd err, output: <nil>: ha-434755-m02
I0919 22:24:52.261646 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755-m02
I0919 22:24:52.283406 203160 main.go:141] libmachine: Using SSH client type: native
I0919 22:24:52.283734 203160 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32788 <nil> <nil>}
I0919 22:24:52.283754 203160 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\sha-434755-m02' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 ha-434755-m02/g' /etc/hosts;
else
echo '127.0.1.1 ha-434755-m02' | sudo tee -a /etc/hosts;
fi
fi
I0919 22:24:52.428353 203160 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0919 22:24:52.428390 203160 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21594-142711/.minikube CaCertPath:/home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21594-142711/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21594-142711/.minikube}
I0919 22:24:52.428420 203160 ubuntu.go:190] setting up certificates
I0919 22:24:52.428441 203160 provision.go:84] configureAuth start
I0919 22:24:52.428536 203160 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-434755-m02
I0919 22:24:52.450885 203160 provision.go:143] copyHostCerts
I0919 22:24:52.450924 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem -> /home/jenkins/minikube-integration/21594-142711/.minikube/ca.pem
I0919 22:24:52.450961 203160 exec_runner.go:144] found /home/jenkins/minikube-integration/21594-142711/.minikube/ca.pem, removing ...
I0919 22:24:52.450971 203160 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21594-142711/.minikube/ca.pem
I0919 22:24:52.451027 203160 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21594-142711/.minikube/ca.pem (1078 bytes)
I0919 22:24:52.451115 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/cert.pem -> /home/jenkins/minikube-integration/21594-142711/.minikube/cert.pem
I0919 22:24:52.451140 203160 exec_runner.go:144] found /home/jenkins/minikube-integration/21594-142711/.minikube/cert.pem, removing ...
I0919 22:24:52.451145 203160 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21594-142711/.minikube/cert.pem
I0919 22:24:52.451185 203160 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21594-142711/.minikube/cert.pem (1123 bytes)
I0919 22:24:52.451248 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/key.pem -> /home/jenkins/minikube-integration/21594-142711/.minikube/key.pem
I0919 22:24:52.451272 203160 exec_runner.go:144] found /home/jenkins/minikube-integration/21594-142711/.minikube/key.pem, removing ...
I0919 22:24:52.451276 203160 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21594-142711/.minikube/key.pem
I0919 22:24:52.451301 203160 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21594-142711/.minikube/key.pem (1675 bytes)
I0919 22:24:52.451355 203160 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21594-142711/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca-key.pem org=jenkins.ha-434755-m02 san=[127.0.0.1 192.168.49.3 ha-434755-m02 localhost minikube]
I0919 22:24:52.822893 203160 provision.go:177] copyRemoteCerts
I0919 22:24:52.822975 203160 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0919 22:24:52.823015 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755-m02
I0919 22:24:52.844478 203160 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32788 SSHKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755-m02/id_rsa Username:docker}
I0919 22:24:52.949460 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem -> /etc/docker/ca.pem
I0919 22:24:52.949550 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I0919 22:24:52.985521 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/machines/server.pem -> /etc/docker/server.pem
I0919 22:24:52.985590 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/machines/server.pem --> /etc/docker/server.pem (1208 bytes)
I0919 22:24:53.015276 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/machines/server-key.pem -> /etc/docker/server-key.pem
I0919 22:24:53.015359 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I0919 22:24:53.043799 203160 provision.go:87] duration metric: took 615.336421ms to configureAuth
I0919 22:24:53.043834 203160 ubuntu.go:206] setting minikube options for container-runtime
I0919 22:24:53.044042 203160 config.go:182] Loaded profile config "ha-434755": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0919 22:24:53.044098 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755-m02
I0919 22:24:53.065294 203160 main.go:141] libmachine: Using SSH client type: native
I0919 22:24:53.065671 203160 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32788 <nil> <nil>}
I0919 22:24:53.065691 203160 main.go:141] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I0919 22:24:53.203158 203160 main.go:141] libmachine: SSH cmd err, output: <nil>: overlay
I0919 22:24:53.203193 203160 ubuntu.go:71] root file system type: overlay
I0919 22:24:53.203308 203160 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I0919 22:24:53.203367 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755-m02
I0919 22:24:53.220915 203160 main.go:141] libmachine: Using SSH client type: native
I0919 22:24:53.221235 203160 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32788 <nil> <nil>}
I0919 22:24:53.221346 203160 main.go:141] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
Environment="NO_PROXY=192.168.49.2"
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 \
-H fd:// --containerd=/run/containerd/containerd.sock \
-H unix:///var/run/docker.sock \
--default-ulimit=nofile=1048576:1048576 \
--tlsverify \
--tlscacert /etc/docker/ca.pem \
--tlscert /etc/docker/server.pem \
--tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I0919 22:24:53.374632 203160 main.go:141] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
Environment=NO_PROXY=192.168.49.2
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
I0919 22:24:53.374713 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755-m02
I0919 22:24:53.392460 203160 main.go:141] libmachine: Using SSH client type: native
I0919 22:24:53.392706 203160 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32788 <nil> <nil>}
I0919 22:24:53.392731 203160 main.go:141] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I0919 22:24:54.550785 203160 main.go:141] libmachine: SSH cmd err, output: <nil>: --- /lib/systemd/system/docker.service 2025-09-03 20:55:49.000000000 +0000
+++ /lib/systemd/system/docker.service.new 2025-09-19 22:24:53.372388319 +0000
@@ -9,23 +9,35 @@
[Service]
Type=notify
-# the default is not to use systemd for cgroups because the delegate issues still
-# exists and systemd currently does not support the cgroup feature set required
-# for containers run by docker
-ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
-ExecReload=/bin/kill -s HUP $MAINPID
-TimeoutStartSec=0
-RestartSec=2
Restart=always
+Environment=NO_PROXY=192.168.49.2
+
+
+# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
+# The base configuration already specifies an 'ExecStart=...' command. The first directive
+# here is to clear out that command inherited from the base configuration. Without this,
+# the command from the base configuration and the command specified here are treated as
+# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
+# will catch this invalid input and refuse to start the service with an error like:
+# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
+
+# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
+# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
+ExecStart=
+ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
+ExecReload=/bin/kill -s HUP $MAINPID
+
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
+LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
-# Comment TasksMax if your systemd version does not support it.
-# Only systemd 226 and above support this option.
+# Uncomment TasksMax if your systemd version supports it.
+# Only systemd 226 and above support this version.
TasksMax=infinity
+TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
Synchronizing state of docker.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable docker
I0919 22:24:54.550828 203160 machine.go:96] duration metric: took 2.648042096s to provisionDockerMachine
I0919 22:24:54.550847 203160 client.go:171] duration metric: took 7.156901293s to LocalClient.Create
I0919 22:24:54.550877 203160 start.go:167] duration metric: took 7.156965929s to libmachine.API.Create "ha-434755"
I0919 22:24:54.550892 203160 start.go:293] postStartSetup for "ha-434755-m02" (driver="docker")
I0919 22:24:54.550905 203160 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0919 22:24:54.550979 203160 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0919 22:24:54.551047 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755-m02
I0919 22:24:54.573731 203160 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32788 SSHKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755-m02/id_rsa Username:docker}
I0919 22:24:54.676450 203160 ssh_runner.go:195] Run: cat /etc/os-release
I0919 22:24:54.680626 203160 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0919 22:24:54.680660 203160 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0919 22:24:54.680669 203160 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0919 22:24:54.680678 203160 info.go:137] Remote host: Ubuntu 22.04.5 LTS
I0919 22:24:54.680695 203160 filesync.go:126] Scanning /home/jenkins/minikube-integration/21594-142711/.minikube/addons for local assets ...
I0919 22:24:54.680757 203160 filesync.go:126] Scanning /home/jenkins/minikube-integration/21594-142711/.minikube/files for local assets ...
I0919 22:24:54.680849 203160 filesync.go:149] local asset: /home/jenkins/minikube-integration/21594-142711/.minikube/files/etc/ssl/certs/1463352.pem -> 1463352.pem in /etc/ssl/certs
I0919 22:24:54.680863 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/files/etc/ssl/certs/1463352.pem -> /etc/ssl/certs/1463352.pem
I0919 22:24:54.680970 203160 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I0919 22:24:54.691341 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/files/etc/ssl/certs/1463352.pem --> /etc/ssl/certs/1463352.pem (1708 bytes)
I0919 22:24:54.722119 203160 start.go:296] duration metric: took 171.208879ms for postStartSetup
I0919 22:24:54.722583 203160 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-434755-m02
I0919 22:24:54.743611 203160 profile.go:143] Saving config to /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/config.json ...
I0919 22:24:54.743848 203160 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0919 22:24:54.743887 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755-m02
I0919 22:24:54.765985 203160 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32788 SSHKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755-m02/id_rsa Username:docker}
I0919 22:24:54.864692 203160 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0919 22:24:54.870738 203160 start.go:128] duration metric: took 7.478790821s to createHost
I0919 22:24:54.870767 203160 start.go:83] releasing machines lock for "ha-434755-m02", held for 7.478950053s
I0919 22:24:54.870847 203160 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-434755-m02
I0919 22:24:54.898999 203160 out.go:179] * Found network options:
I0919 22:24:54.900212 203160 out.go:179] - NO_PROXY=192.168.49.2
W0919 22:24:54.901275 203160 proxy.go:120] fail to check proxy env: Error ip not in block
W0919 22:24:54.901331 203160 proxy.go:120] fail to check proxy env: Error ip not in block
I0919 22:24:54.901436 203160 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0919 22:24:54.901515 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755-m02
I0919 22:24:54.901712 203160 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0919 22:24:54.901788 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755-m02
I0919 22:24:54.923297 203160 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32788 SSHKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755-m02/id_rsa Username:docker}
I0919 22:24:54.924737 203160 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32788 SSHKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755-m02/id_rsa Username:docker}
I0919 22:24:55.020889 203160 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I0919 22:24:55.117431 203160 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I0919 22:24:55.117543 203160 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0919 22:24:55.154058 203160 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I0919 22:24:55.154092 203160 start.go:495] detecting cgroup driver to use...
I0919 22:24:55.154128 203160 detect.go:190] detected "systemd" cgroup driver on host os
I0919 22:24:55.154249 203160 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0919 22:24:55.171125 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I0919 22:24:55.182699 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0919 22:24:55.193910 203160 containerd.go:146] configuring containerd to use "systemd" as cgroup driver...
I0919 22:24:55.193981 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I0919 22:24:55.206930 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0919 22:24:55.218445 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0919 22:24:55.229676 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0919 22:24:55.239797 203160 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0919 22:24:55.249561 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0919 22:24:55.261388 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0919 22:24:55.272063 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0919 22:24:55.285133 203160 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0919 22:24:55.294764 203160 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0919 22:24:55.304309 203160 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0919 22:24:55.385891 203160 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0919 22:24:55.483649 203160 start.go:495] detecting cgroup driver to use...
I0919 22:24:55.483704 203160 detect.go:190] detected "systemd" cgroup driver on host os
I0919 22:24:55.483771 203160 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I0919 22:24:55.498112 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I0919 22:24:55.511999 203160 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
I0919 22:24:55.531010 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I0919 22:24:55.547951 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0919 22:24:55.562055 203160 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I0919 22:24:55.582950 203160 ssh_runner.go:195] Run: which cri-dockerd
I0919 22:24:55.588111 203160 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I0919 22:24:55.600129 203160 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (192 bytes)
I0919 22:24:55.622263 203160 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I0919 22:24:55.715078 203160 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I0919 22:24:55.798019 203160 docker.go:575] configuring docker to use "systemd" as cgroup driver...
I0919 22:24:55.798075 203160 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (129 bytes)
I0919 22:24:55.821473 203160 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
I0919 22:24:55.835550 203160 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0919 22:24:55.921379 203160 ssh_runner.go:195] Run: sudo systemctl restart docker
I0919 22:24:56.663040 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I0919 22:24:56.676296 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I0919 22:24:56.691640 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0919 22:24:56.705621 203160 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I0919 22:24:56.790623 203160 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I0919 22:24:56.868190 203160 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0919 22:24:56.965154 203160 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I0919 22:24:56.986139 203160 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I0919 22:24:56.999297 203160 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0919 22:24:57.084263 203160 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I0919 22:24:57.171144 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0919 22:24:57.185630 203160 start.go:542] Will wait 60s for socket path /var/run/cri-dockerd.sock
I0919 22:24:57.185700 203160 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I0919 22:24:57.190173 203160 start.go:563] Will wait 60s for crictl version
I0919 22:24:57.190233 203160 ssh_runner.go:195] Run: which crictl
I0919 22:24:57.194000 203160 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0919 22:24:57.238791 203160 start.go:579] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 28.4.0
RuntimeApiVersion: v1
I0919 22:24:57.238870 203160 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0919 22:24:57.271275 203160 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0919 22:24:57.304909 203160 out.go:252] * Preparing Kubernetes v1.34.0 on Docker 28.4.0 ...
I0919 22:24:57.306146 203160 out.go:179] - env NO_PROXY=192.168.49.2
I0919 22:24:57.307257 203160 cli_runner.go:164] Run: docker network inspect ha-434755 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0919 22:24:57.328319 203160 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I0919 22:24:57.333877 203160 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0919 22:24:57.348827 203160 mustload.go:65] Loading cluster: ha-434755
I0919 22:24:57.349095 203160 config.go:182] Loaded profile config "ha-434755": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0919 22:24:57.349417 203160 cli_runner.go:164] Run: docker container inspect ha-434755 --format={{.State.Status}}
I0919 22:24:57.372031 203160 host.go:66] Checking if "ha-434755" exists ...
I0919 22:24:57.372263 203160 certs.go:68] Setting up /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755 for IP: 192.168.49.3
I0919 22:24:57.372273 203160 certs.go:194] generating shared ca certs ...
I0919 22:24:57.372289 203160 certs.go:226] acquiring lock for ca certs: {Name:mkc5df652d6204fd8687dfaaf83b02c6e10b58b2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:24:57.372399 203160 certs.go:235] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21594-142711/.minikube/ca.key
I0919 22:24:57.372434 203160 certs.go:235] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21594-142711/.minikube/proxy-client-ca.key
I0919 22:24:57.372443 203160 certs.go:256] generating profile certs ...
I0919 22:24:57.372523 203160 certs.go:359] skipping valid signed profile cert regeneration for "minikube-user": /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/client.key
I0919 22:24:57.372551 203160 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.key.be912a57
I0919 22:24:57.372569 203160 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.crt.be912a57 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.49.2 192.168.49.3 192.168.49.254]
I0919 22:24:57.438372 203160 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.crt.be912a57 ...
I0919 22:24:57.438407 203160 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.crt.be912a57: {Name:mk30b073ffbf49812fc1c5fc78a448cc1824100f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:24:57.438643 203160 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.key.be912a57 ...
I0919 22:24:57.438666 203160 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.key.be912a57: {Name:mk59c79ca511caeebb332978950944f46d4ce354 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:24:57.438796 203160 certs.go:381] copying /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.crt.be912a57 -> /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.crt
I0919 22:24:57.438979 203160 certs.go:385] copying /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.key.be912a57 -> /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.key
I0919 22:24:57.439158 203160 certs.go:359] skipping valid signed profile cert regeneration for "aggregator": /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/proxy-client.key
I0919 22:24:57.439184 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/ca.crt -> /var/lib/minikube/certs/ca.crt
I0919 22:24:57.439202 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/ca.key -> /var/lib/minikube/certs/ca.key
I0919 22:24:57.439220 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/proxy-client-ca.crt -> /var/lib/minikube/certs/proxy-client-ca.crt
I0919 22:24:57.439238 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/proxy-client-ca.key -> /var/lib/minikube/certs/proxy-client-ca.key
I0919 22:24:57.439256 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.crt -> /var/lib/minikube/certs/apiserver.crt
I0919 22:24:57.439273 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.key -> /var/lib/minikube/certs/apiserver.key
I0919 22:24:57.439294 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/proxy-client.crt -> /var/lib/minikube/certs/proxy-client.crt
I0919 22:24:57.439312 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/proxy-client.key -> /var/lib/minikube/certs/proxy-client.key
I0919 22:24:57.439376 203160 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/146335.pem (1338 bytes)
W0919 22:24:57.439458 203160 certs.go:480] ignoring /home/jenkins/minikube-integration/21594-142711/.minikube/certs/146335_empty.pem, impossibly tiny 0 bytes
I0919 22:24:57.439474 203160 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca-key.pem (1675 bytes)
I0919 22:24:57.439537 203160 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem (1078 bytes)
I0919 22:24:57.439573 203160 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/cert.pem (1123 bytes)
I0919 22:24:57.439608 203160 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/key.pem (1675 bytes)
I0919 22:24:57.439670 203160 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-142711/.minikube/files/etc/ssl/certs/1463352.pem (1708 bytes)
I0919 22:24:57.439716 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/files/etc/ssl/certs/1463352.pem -> /usr/share/ca-certificates/1463352.pem
I0919 22:24:57.439743 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/ca.crt -> /usr/share/ca-certificates/minikubeCA.pem
I0919 22:24:57.439759 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/146335.pem -> /usr/share/ca-certificates/146335.pem
I0919 22:24:57.439830 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755
I0919 22:24:57.462047 203160 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32783 SSHKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755/id_rsa Username:docker}
I0919 22:24:57.557856 203160 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/sa.pub
I0919 22:24:57.562525 203160 ssh_runner.go:447] scp /var/lib/minikube/certs/sa.pub --> memory (451 bytes)
I0919 22:24:57.578095 203160 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/sa.key
I0919 22:24:57.582466 203160 ssh_runner.go:447] scp /var/lib/minikube/certs/sa.key --> memory (1675 bytes)
I0919 22:24:57.599559 203160 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/front-proxy-ca.crt
I0919 22:24:57.603627 203160 ssh_runner.go:447] scp /var/lib/minikube/certs/front-proxy-ca.crt --> memory (1123 bytes)
I0919 22:24:57.618994 203160 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/front-proxy-ca.key
I0919 22:24:57.622912 203160 ssh_runner.go:447] scp /var/lib/minikube/certs/front-proxy-ca.key --> memory (1675 bytes)
I0919 22:24:57.638660 203160 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/etcd/ca.crt
I0919 22:24:57.643248 203160 ssh_runner.go:447] scp /var/lib/minikube/certs/etcd/ca.crt --> memory (1094 bytes)
I0919 22:24:57.660006 203160 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/etcd/ca.key
I0919 22:24:57.664313 203160 ssh_runner.go:447] scp /var/lib/minikube/certs/etcd/ca.key --> memory (1675 bytes)
I0919 22:24:57.680744 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0919 22:24:57.714036 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
I0919 22:24:57.747544 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0919 22:24:57.780943 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I0919 22:24:57.812353 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1436 bytes)
I0919 22:24:57.845693 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I0919 22:24:57.878130 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0919 22:24:57.911308 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I0919 22:24:57.946218 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/files/etc/ssl/certs/1463352.pem --> /usr/share/ca-certificates/1463352.pem (1708 bytes)
I0919 22:24:57.984297 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0919 22:24:58.017177 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/certs/146335.pem --> /usr/share/ca-certificates/146335.pem (1338 bytes)
I0919 22:24:58.049420 203160 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/sa.pub (451 bytes)
I0919 22:24:58.073963 203160 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/sa.key (1675 bytes)
I0919 22:24:58.097887 203160 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/front-proxy-ca.crt (1123 bytes)
I0919 22:24:58.122255 203160 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/front-proxy-ca.key (1675 bytes)
I0919 22:24:58.147967 203160 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/etcd/ca.crt (1094 bytes)
I0919 22:24:58.171849 203160 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/etcd/ca.key (1675 bytes)
I0919 22:24:58.195690 203160 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (744 bytes)
I0919 22:24:58.219698 203160 ssh_runner.go:195] Run: openssl version
I0919 22:24:58.227264 203160 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/1463352.pem && ln -fs /usr/share/ca-certificates/1463352.pem /etc/ssl/certs/1463352.pem"
I0919 22:24:58.240247 203160 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/1463352.pem
I0919 22:24:58.244702 203160 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Sep 19 22:20 /usr/share/ca-certificates/1463352.pem
I0919 22:24:58.244768 203160 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/1463352.pem
I0919 22:24:58.254189 203160 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/1463352.pem /etc/ssl/certs/3ec20f2e.0"
I0919 22:24:58.265745 203160 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0919 22:24:58.279180 203160 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0919 22:24:58.284030 203160 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Sep 19 22:15 /usr/share/ca-certificates/minikubeCA.pem
I0919 22:24:58.284084 203160 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0919 22:24:58.292591 203160 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0919 22:24:58.305819 203160 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/146335.pem && ln -fs /usr/share/ca-certificates/146335.pem /etc/ssl/certs/146335.pem"
I0919 22:24:58.318945 203160 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/146335.pem
I0919 22:24:58.323696 203160 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Sep 19 22:20 /usr/share/ca-certificates/146335.pem
I0919 22:24:58.323742 203160 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/146335.pem
I0919 22:24:58.333578 203160 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/146335.pem /etc/ssl/certs/51391683.0"
I0919 22:24:58.346835 203160 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0919 22:24:58.351013 203160 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I0919 22:24:58.351074 203160 kubeadm.go:926] updating node {m02 192.168.49.3 8443 v1.34.0 docker true true} ...
I0919 22:24:58.351194 203160 kubeadm.go:938] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=ha-434755-m02 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.3
[Install]
config:
{KubernetesVersion:v1.34.0 ClusterName:ha-434755 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0919 22:24:58.351227 203160 kube-vip.go:115] generating kube-vip config ...
I0919 22:24:58.351267 203160 ssh_runner.go:195] Run: sudo sh -c "lsmod | grep ip_vs"
I0919 22:24:58.367957 203160 kube-vip.go:163] giving up enabling control-plane load-balancing as ipvs kernel modules appears not to be available: sudo sh -c "lsmod | grep ip_vs": Process exited with status 1
stdout:
stderr:
I0919 22:24:58.368034 203160 kube-vip.go:137] kube-vip config:
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
name: kube-vip
namespace: kube-system
spec:
containers:
- args:
- manager
env:
- name: vip_arp
value: "true"
- name: port
value: "8443"
- name: vip_nodename
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: vip_interface
value: eth0
- name: vip_cidr
value: "32"
- name: dns_mode
value: first
- name: cp_enable
value: "true"
- name: cp_namespace
value: kube-system
- name: vip_leaderelection
value: "true"
- name: vip_leasename
value: plndr-cp-lock
- name: vip_leaseduration
value: "5"
- name: vip_renewdeadline
value: "3"
- name: vip_retryperiod
value: "1"
- name: address
value: 192.168.49.254
- name: prometheus_server
value: :2112
image: ghcr.io/kube-vip/kube-vip:v1.0.0
imagePullPolicy: IfNotPresent
name: kube-vip
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_RAW
volumeMounts:
- mountPath: /etc/kubernetes/admin.conf
name: kubeconfig
hostAliases:
- hostnames:
- kubernetes
ip: 127.0.0.1
hostNetwork: true
volumes:
- hostPath:
path: "/etc/kubernetes/admin.conf"
name: kubeconfig
status: {}
I0919 22:24:58.368096 203160 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.0
I0919 22:24:58.379862 203160 binaries.go:44] Found k8s binaries, skipping transfer
I0919 22:24:58.379941 203160 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /etc/kubernetes/manifests
I0919 22:24:58.392276 203160 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (312 bytes)
I0919 22:24:58.417444 203160 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0919 22:24:58.442669 203160 ssh_runner.go:362] scp memory --> /etc/kubernetes/manifests/kube-vip.yaml (1358 bytes)
I0919 22:24:58.468697 203160 ssh_runner.go:195] Run: grep 192.168.49.254 control-plane.minikube.internal$ /etc/hosts
I0919 22:24:58.473305 203160 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.254 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0919 22:24:58.487646 203160 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0919 22:24:58.578606 203160 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0919 22:24:58.608451 203160 host.go:66] Checking if "ha-434755" exists ...
I0919 22:24:58.608749 203160 start.go:317] joinCluster: &{Name:ha-434755 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:ha-434755 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[]
DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true} {Name:m02 IP:192.168.49.3 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0
MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0919 22:24:58.608859 203160 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm token create --print-join-command --ttl=0"
I0919 22:24:58.608912 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755
I0919 22:24:58.632792 203160 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32783 SSHKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755/id_rsa Username:docker}
I0919 22:24:58.802805 203160 start.go:343] trying to join control-plane node "m02" to cluster: &{Name:m02 IP:192.168.49.3 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}
I0919 22:24:58.802874 203160 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm join control-plane.minikube.internal:8443 --token b4953v.b0t4y42p8a3t0277 --discovery-token-ca-cert-hash sha256:6e34938835ca5de20dcd743043ff221a1493ef970b34561f39a513839570935a --ignore-preflight-errors=all --cri-socket unix:///var/run/cri-dockerd.sock --node-name=ha-434755-m02 --control-plane --apiserver-advertise-address=192.168.49.3 --apiserver-bind-port=8443"
I0919 22:25:17.080561 203160 ssh_runner.go:235] Completed: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm join control-plane.minikube.internal:8443 --token b4953v.b0t4y42p8a3t0277 --discovery-token-ca-cert-hash sha256:6e34938835ca5de20dcd743043ff221a1493ef970b34561f39a513839570935a --ignore-preflight-errors=all --cri-socket unix:///var/run/cri-dockerd.sock --node-name=ha-434755-m02 --control-plane --apiserver-advertise-address=192.168.49.3 --apiserver-bind-port=8443": (18.277615829s)
I0919 22:25:17.080625 203160 ssh_runner.go:195] Run: /bin/bash -c "sudo systemctl daemon-reload && sudo systemctl enable kubelet && sudo systemctl start kubelet"
I0919 22:25:17.341701 203160 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes ha-434755-m02 minikube.k8s.io/updated_at=2025_09_19T22_25_17_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=6e37ee63f758843bb5fe33c3a528c564c4b83d53 minikube.k8s.io/name=ha-434755 minikube.k8s.io/primary=false
I0919 22:25:17.424260 203160 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig taint nodes ha-434755-m02 node-role.kubernetes.io/control-plane:NoSchedule-
I0919 22:25:17.499697 203160 start.go:319] duration metric: took 18.890943143s to joinCluster
I0919 22:25:17.499790 203160 start.go:235] Will wait 6m0s for node &{Name:m02 IP:192.168.49.3 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}
I0919 22:25:17.500059 203160 config.go:182] Loaded profile config "ha-434755": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0919 22:25:17.501017 203160 out.go:179] * Verifying Kubernetes components...
I0919 22:25:17.502040 203160 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0919 22:25:17.615768 203160 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0919 22:25:17.630185 203160 kapi.go:59] client config for ha-434755: &rest.Config{Host:"https://192.168.49.254:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/client.crt", KeyFile:"/home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/client.key", CAFile:"/home/jenkins/minikube-integration/21594-142711/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(nil), CAData:[]uint8(nil), NextProtos:[]string(n
il)}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x27f4a00), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), WarningHandlerWithContext:rest.WarningHandlerWithContext(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
W0919 22:25:17.630259 203160 kubeadm.go:483] Overriding stale ClientConfig host https://192.168.49.254:8443 with https://192.168.49.2:8443
I0919 22:25:17.630522 203160 node_ready.go:35] waiting up to 6m0s for node "ha-434755-m02" to be "Ready" ...
I0919 22:25:17.639687 203160 node_ready.go:49] node "ha-434755-m02" is "Ready"
I0919 22:25:17.639715 203160 node_ready.go:38] duration metric: took 9.169272ms for node "ha-434755-m02" to be "Ready" ...
I0919 22:25:17.639733 203160 api_server.go:52] waiting for apiserver process to appear ...
I0919 22:25:17.639783 203160 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0919 22:25:17.654193 203160 api_server.go:72] duration metric: took 154.362028ms to wait for apiserver process to appear ...
I0919 22:25:17.654221 203160 api_server.go:88] waiting for apiserver healthz status ...
I0919 22:25:17.654246 203160 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8443/healthz ...
I0919 22:25:17.658704 203160 api_server.go:279] https://192.168.49.2:8443/healthz returned 200:
ok
I0919 22:25:17.659870 203160 api_server.go:141] control plane version: v1.34.0
I0919 22:25:17.659894 203160 api_server.go:131] duration metric: took 5.665643ms to wait for apiserver health ...
I0919 22:25:17.659902 203160 system_pods.go:43] waiting for kube-system pods to appear ...
I0919 22:25:17.664793 203160 system_pods.go:59] 18 kube-system pods found
I0919 22:25:17.664839 203160 system_pods.go:61] "coredns-66bc5c9577-4lmln" [0f31e1cc-6bbb-4987-93c7-48e61288b609] Running
I0919 22:25:17.664851 203160 system_pods.go:61] "coredns-66bc5c9577-w8trg" [54431fee-554c-4c3c-9c81-d779981d36db] Running
I0919 22:25:17.664856 203160 system_pods.go:61] "etcd-ha-434755" [efa4db41-3739-45d6-ada5-d66dd5b82f46] Running
I0919 22:25:17.664862 203160 system_pods.go:61] "etcd-ha-434755-m02" [c47d7da8-6337-4062-a7d1-707ebc8f4df5] Running
I0919 22:25:17.664875 203160 system_pods.go:61] "kindnet-74q9s" [06bab6e9-ad22-4651-947e-723307c31d04] Pending / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni])
I0919 22:25:17.664883 203160 system_pods.go:61] "kindnet-djvx4" [dd2c97ac-215c-4657-a3af-bf74603285af] Running
I0919 22:25:17.664891 203160 system_pods.go:61] "kube-apiserver-ha-434755" [fdcd2f64-6b9f-40ed-be07-24beef072bca] Running
I0919 22:25:17.664903 203160 system_pods.go:61] "kube-apiserver-ha-434755-m02" [bcc4bd8e-7086-4034-94f8-865e02212e7b] Running
I0919 22:25:17.664909 203160 system_pods.go:61] "kube-controller-manager-ha-434755" [66066c78-f094-492d-9c71-a683cccd45a0] Running
I0919 22:25:17.664921 203160 system_pods.go:61] "kube-controller-manager-ha-434755-m02" [290b348b-6c1a-4891-990b-c943066ab212] Running
I0919 22:25:17.664931 203160 system_pods.go:61] "kube-proxy-4cnsm" [a477a521-e24b-449d-854f-c873cb517164] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy])
I0919 22:25:17.664938 203160 system_pods.go:61] "kube-proxy-gzpg8" [9d9843d9-c2ca-4751-8af5-f8fc91cf07c9] Running
I0919 22:25:17.664946 203160 system_pods.go:61] "kube-proxy-tzxjp" [68f449c9-12dc-40e2-9d22-a0c067962cb9] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy])
I0919 22:25:17.664954 203160 system_pods.go:61] "kube-scheduler-ha-434755" [593d9f5b-40f3-47b7-aef2-b25348983754] Running
I0919 22:25:17.664962 203160 system_pods.go:61] "kube-scheduler-ha-434755-m02" [34109527-5e07-415c-9bfc-d500d75092ca] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I0919 22:25:17.664969 203160 system_pods.go:61] "kube-vip-ha-434755" [eb65f5df-597d-4d36-b4c4-e33b1c1a6b35] Running
I0919 22:25:17.664975 203160 system_pods.go:61] "kube-vip-ha-434755-m02" [30071515-3665-4872-a66b-3d8ddccb0cae] Running
I0919 22:25:17.664981 203160 system_pods.go:61] "storage-provisioner" [fb950ab4-a515-4298-b7f0-e01d6290af75] Running
I0919 22:25:17.664991 203160 system_pods.go:74] duration metric: took 5.081378ms to wait for pod list to return data ...
I0919 22:25:17.665004 203160 default_sa.go:34] waiting for default service account to be created ...
I0919 22:25:17.668317 203160 default_sa.go:45] found service account: "default"
I0919 22:25:17.668340 203160 default_sa.go:55] duration metric: took 3.328321ms for default service account to be created ...
I0919 22:25:17.668351 203160 system_pods.go:116] waiting for k8s-apps to be running ...
I0919 22:25:17.673137 203160 system_pods.go:86] 18 kube-system pods found
I0919 22:25:17.673173 203160 system_pods.go:89] "coredns-66bc5c9577-4lmln" [0f31e1cc-6bbb-4987-93c7-48e61288b609] Running
I0919 22:25:17.673190 203160 system_pods.go:89] "coredns-66bc5c9577-w8trg" [54431fee-554c-4c3c-9c81-d779981d36db] Running
I0919 22:25:17.673196 203160 system_pods.go:89] "etcd-ha-434755" [efa4db41-3739-45d6-ada5-d66dd5b82f46] Running
I0919 22:25:17.673202 203160 system_pods.go:89] "etcd-ha-434755-m02" [c47d7da8-6337-4062-a7d1-707ebc8f4df5] Running
I0919 22:25:17.673216 203160 system_pods.go:89] "kindnet-74q9s" [06bab6e9-ad22-4651-947e-723307c31d04] Pending / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni])
I0919 22:25:17.673225 203160 system_pods.go:89] "kindnet-djvx4" [dd2c97ac-215c-4657-a3af-bf74603285af] Running
I0919 22:25:17.673232 203160 system_pods.go:89] "kube-apiserver-ha-434755" [fdcd2f64-6b9f-40ed-be07-24beef072bca] Running
I0919 22:25:17.673239 203160 system_pods.go:89] "kube-apiserver-ha-434755-m02" [bcc4bd8e-7086-4034-94f8-865e02212e7b] Running
I0919 22:25:17.673245 203160 system_pods.go:89] "kube-controller-manager-ha-434755" [66066c78-f094-492d-9c71-a683cccd45a0] Running
I0919 22:25:17.673253 203160 system_pods.go:89] "kube-controller-manager-ha-434755-m02" [290b348b-6c1a-4891-990b-c943066ab212] Running
I0919 22:25:17.673261 203160 system_pods.go:89] "kube-proxy-4cnsm" [a477a521-e24b-449d-854f-c873cb517164] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy])
I0919 22:25:17.673269 203160 system_pods.go:89] "kube-proxy-gzpg8" [9d9843d9-c2ca-4751-8af5-f8fc91cf07c9] Running
I0919 22:25:17.673277 203160 system_pods.go:89] "kube-proxy-tzxjp" [68f449c9-12dc-40e2-9d22-a0c067962cb9] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy])
I0919 22:25:17.673285 203160 system_pods.go:89] "kube-scheduler-ha-434755" [593d9f5b-40f3-47b7-aef2-b25348983754] Running
I0919 22:25:17.673306 203160 system_pods.go:89] "kube-scheduler-ha-434755-m02" [34109527-5e07-415c-9bfc-d500d75092ca] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I0919 22:25:17.673316 203160 system_pods.go:89] "kube-vip-ha-434755" [eb65f5df-597d-4d36-b4c4-e33b1c1a6b35] Running
I0919 22:25:17.673321 203160 system_pods.go:89] "kube-vip-ha-434755-m02" [30071515-3665-4872-a66b-3d8ddccb0cae] Running
I0919 22:25:17.673325 203160 system_pods.go:89] "storage-provisioner" [fb950ab4-a515-4298-b7f0-e01d6290af75] Running
I0919 22:25:17.673334 203160 system_pods.go:126] duration metric: took 4.976103ms to wait for k8s-apps to be running ...
I0919 22:25:17.673343 203160 system_svc.go:44] waiting for kubelet service to be running ....
I0919 22:25:17.673397 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I0919 22:25:17.689275 203160 system_svc.go:56] duration metric: took 15.922768ms WaitForService to wait for kubelet
I0919 22:25:17.689301 203160 kubeadm.go:578] duration metric: took 189.477657ms to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0919 22:25:17.689322 203160 node_conditions.go:102] verifying NodePressure condition ...
I0919 22:25:17.693097 203160 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I0919 22:25:17.693135 203160 node_conditions.go:123] node cpu capacity is 8
I0919 22:25:17.693151 203160 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I0919 22:25:17.693156 203160 node_conditions.go:123] node cpu capacity is 8
I0919 22:25:17.693162 203160 node_conditions.go:105] duration metric: took 3.833677ms to run NodePressure ...
I0919 22:25:17.693179 203160 start.go:241] waiting for startup goroutines ...
I0919 22:25:17.693211 203160 start.go:255] writing updated cluster config ...
I0919 22:25:17.695103 203160 out.go:203]
I0919 22:25:17.698818 203160 config.go:182] Loaded profile config "ha-434755": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0919 22:25:17.698972 203160 profile.go:143] Saving config to /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/config.json ...
I0919 22:25:17.700470 203160 out.go:179] * Starting "ha-434755-m03" control-plane node in "ha-434755" cluster
I0919 22:25:17.701508 203160 cache.go:123] Beginning downloading kic base image for docker with docker
I0919 22:25:17.702525 203160 out.go:179] * Pulling base image v0.0.48 ...
I0919 22:25:17.703600 203160 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime docker
I0919 22:25:17.703627 203160 cache.go:58] Caching tarball of preloaded images
I0919 22:25:17.703660 203160 image.go:81] Checking for gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local docker daemon
I0919 22:25:17.703750 203160 preload.go:172] Found /home/jenkins/minikube-integration/21594-142711/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-amd64.tar.lz4 in cache, skipping download
I0919 22:25:17.703762 203160 cache.go:61] Finished verifying existence of preloaded tar for v1.34.0 on docker
I0919 22:25:17.703897 203160 profile.go:143] Saving config to /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/config.json ...
I0919 22:25:17.728614 203160 image.go:100] Found gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 in local docker daemon, skipping pull
I0919 22:25:17.728640 203160 cache.go:147] gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 exists in daemon, skipping load
I0919 22:25:17.728661 203160 cache.go:232] Successfully downloaded all kic artifacts
I0919 22:25:17.728696 203160 start.go:360] acquireMachinesLock for ha-434755-m03: {Name:mk4499ef8414fba131017fb3f66e00435d0a646b Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0919 22:25:17.728819 203160 start.go:364] duration metric: took 98.455µs to acquireMachinesLock for "ha-434755-m03"
I0919 22:25:17.728853 203160 start.go:93] Provisioning new machine with config: &{Name:ha-434755 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:ha-434755 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerN
ames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true} {Name:m02 IP:192.168.49.3 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true} {Name:m03 IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:false efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:fals
e kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:false storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetP
ath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name:m03 IP: Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}
I0919 22:25:17.728991 203160 start.go:125] createHost starting for "m03" (driver="docker")
I0919 22:25:17.732545 203160 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I0919 22:25:17.732672 203160 start.go:159] libmachine.API.Create for "ha-434755" (driver="docker")
I0919 22:25:17.732707 203160 client.go:168] LocalClient.Create starting
I0919 22:25:17.732782 203160 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem
I0919 22:25:17.732823 203160 main.go:141] libmachine: Decoding PEM data...
I0919 22:25:17.732845 203160 main.go:141] libmachine: Parsing certificate...
I0919 22:25:17.732912 203160 main.go:141] libmachine: Reading certificate data from /home/jenkins/minikube-integration/21594-142711/.minikube/certs/cert.pem
I0919 22:25:17.732939 203160 main.go:141] libmachine: Decoding PEM data...
I0919 22:25:17.732958 203160 main.go:141] libmachine: Parsing certificate...
I0919 22:25:17.733232 203160 cli_runner.go:164] Run: docker network inspect ha-434755 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0919 22:25:17.751632 203160 network_create.go:77] Found existing network {name:ha-434755 subnet:0xc00219e2a0 gateway:[0 0 0 0 0 0 0 0 0 0 255 255 192 168 49 1] mtu:1500}
I0919 22:25:17.751674 203160 kic.go:121] calculated static IP "192.168.49.4" for the "ha-434755-m03" container
I0919 22:25:17.751747 203160 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0919 22:25:17.770069 203160 cli_runner.go:164] Run: docker volume create ha-434755-m03 --label name.minikube.sigs.k8s.io=ha-434755-m03 --label created_by.minikube.sigs.k8s.io=true
I0919 22:25:17.789823 203160 oci.go:103] Successfully created a docker volume ha-434755-m03
I0919 22:25:17.789902 203160 cli_runner.go:164] Run: docker run --rm --name ha-434755-m03-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=ha-434755-m03 --entrypoint /usr/bin/test -v ha-434755-m03:/var gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -d /var/lib
I0919 22:25:18.164388 203160 oci.go:107] Successfully prepared a docker volume ha-434755-m03
I0919 22:25:18.164435 203160 preload.go:131] Checking if preload exists for k8s version v1.34.0 and runtime docker
I0919 22:25:18.164462 203160 kic.go:194] Starting extracting preloaded images to volume ...
I0919 22:25:18.164543 203160 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21594-142711/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v ha-434755-m03:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -I lz4 -xf /preloaded.tar -C /extractDir
I0919 22:25:21.103950 203160 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/21594-142711/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.0-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v ha-434755-m03:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 -I lz4 -xf /preloaded.tar -C /extractDir: (2.939357533s)
I0919 22:25:21.103986 203160 kic.go:203] duration metric: took 2.939518923s to extract preloaded images to volume ...
W0919 22:25:21.104096 203160 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
W0919 22:25:21.104151 203160 oci.go:252] Your kernel does not support CPU cfs period/quota or the cgroup is not mounted.
I0919 22:25:21.104202 203160 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0919 22:25:21.177154 203160 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname ha-434755-m03 --name ha-434755-m03 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=ha-434755-m03 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=ha-434755-m03 --network ha-434755 --ip 192.168.49.4 --volume ha-434755-m03:/var --security-opt apparmor=unconfined --memory=3072mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1
I0919 22:25:21.498634 203160 cli_runner.go:164] Run: docker container inspect ha-434755-m03 --format={{.State.Running}}
I0919 22:25:21.522257 203160 cli_runner.go:164] Run: docker container inspect ha-434755-m03 --format={{.State.Status}}
I0919 22:25:21.545087 203160 cli_runner.go:164] Run: docker exec ha-434755-m03 stat /var/lib/dpkg/alternatives/iptables
I0919 22:25:21.601217 203160 oci.go:144] the created container "ha-434755-m03" has a running status.
I0919 22:25:21.601289 203160 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755-m03/id_rsa...
I0919 22:25:21.834101 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755-m03/id_rsa.pub -> /home/docker/.ssh/authorized_keys
I0919 22:25:21.834162 203160 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755-m03/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0919 22:25:21.931924 203160 cli_runner.go:164] Run: docker container inspect ha-434755-m03 --format={{.State.Status}}
I0919 22:25:21.958463 203160 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0919 22:25:21.958488 203160 kic_runner.go:114] Args: [docker exec --privileged ha-434755-m03 chown docker:docker /home/docker/.ssh/authorized_keys]
I0919 22:25:22.013210 203160 cli_runner.go:164] Run: docker container inspect ha-434755-m03 --format={{.State.Status}}
I0919 22:25:22.034113 203160 machine.go:93] provisionDockerMachine start ...
I0919 22:25:22.034216 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755-m03
I0919 22:25:22.055636 203160 main.go:141] libmachine: Using SSH client type: native
I0919 22:25:22.055967 203160 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32793 <nil> <nil>}
I0919 22:25:22.055993 203160 main.go:141] libmachine: About to run SSH command:
hostname
I0919 22:25:22.197369 203160 main.go:141] libmachine: SSH cmd err, output: <nil>: ha-434755-m03
I0919 22:25:22.197398 203160 ubuntu.go:182] provisioning hostname "ha-434755-m03"
I0919 22:25:22.197459 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755-m03
I0919 22:25:22.216027 203160 main.go:141] libmachine: Using SSH client type: native
I0919 22:25:22.216285 203160 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32793 <nil> <nil>}
I0919 22:25:22.216301 203160 main.go:141] libmachine: About to run SSH command:
sudo hostname ha-434755-m03 && echo "ha-434755-m03" | sudo tee /etc/hostname
I0919 22:25:22.368448 203160 main.go:141] libmachine: SSH cmd err, output: <nil>: ha-434755-m03
I0919 22:25:22.368549 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755-m03
I0919 22:25:22.386972 203160 main.go:141] libmachine: Using SSH client type: native
I0919 22:25:22.387278 203160 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32793 <nil> <nil>}
I0919 22:25:22.387304 203160 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\sha-434755-m03' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 ha-434755-m03/g' /etc/hosts;
else
echo '127.0.1.1 ha-434755-m03' | sudo tee -a /etc/hosts;
fi
fi
I0919 22:25:22.524292 203160 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0919 22:25:22.524331 203160 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/21594-142711/.minikube CaCertPath:/home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/21594-142711/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/21594-142711/.minikube}
I0919 22:25:22.524354 203160 ubuntu.go:190] setting up certificates
I0919 22:25:22.524368 203160 provision.go:84] configureAuth start
I0919 22:25:22.524434 203160 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-434755-m03
I0919 22:25:22.541928 203160 provision.go:143] copyHostCerts
I0919 22:25:22.541971 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/cert.pem -> /home/jenkins/minikube-integration/21594-142711/.minikube/cert.pem
I0919 22:25:22.542000 203160 exec_runner.go:144] found /home/jenkins/minikube-integration/21594-142711/.minikube/cert.pem, removing ...
I0919 22:25:22.542009 203160 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21594-142711/.minikube/cert.pem
I0919 22:25:22.542076 203160 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/21594-142711/.minikube/cert.pem (1123 bytes)
I0919 22:25:22.542159 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/key.pem -> /home/jenkins/minikube-integration/21594-142711/.minikube/key.pem
I0919 22:25:22.542180 203160 exec_runner.go:144] found /home/jenkins/minikube-integration/21594-142711/.minikube/key.pem, removing ...
I0919 22:25:22.542186 203160 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21594-142711/.minikube/key.pem
I0919 22:25:22.542213 203160 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/21594-142711/.minikube/key.pem (1675 bytes)
I0919 22:25:22.542310 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem -> /home/jenkins/minikube-integration/21594-142711/.minikube/ca.pem
I0919 22:25:22.542334 203160 exec_runner.go:144] found /home/jenkins/minikube-integration/21594-142711/.minikube/ca.pem, removing ...
I0919 22:25:22.542337 203160 exec_runner.go:203] rm: /home/jenkins/minikube-integration/21594-142711/.minikube/ca.pem
I0919 22:25:22.542362 203160 exec_runner.go:151] cp: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/21594-142711/.minikube/ca.pem (1078 bytes)
I0919 22:25:22.542414 203160 provision.go:117] generating server cert: /home/jenkins/minikube-integration/21594-142711/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca-key.pem org=jenkins.ha-434755-m03 san=[127.0.0.1 192.168.49.4 ha-434755-m03 localhost minikube]
I0919 22:25:22.877628 203160 provision.go:177] copyRemoteCerts
I0919 22:25:22.877694 203160 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0919 22:25:22.877741 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755-m03
I0919 22:25:22.896937 203160 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32793 SSHKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755-m03/id_rsa Username:docker}
I0919 22:25:22.995146 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/machines/server-key.pem -> /etc/docker/server-key.pem
I0919 22:25:22.995210 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I0919 22:25:23.022236 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem -> /etc/docker/ca.pem
I0919 22:25:23.022316 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I0919 22:25:23.047563 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/machines/server.pem -> /etc/docker/server.pem
I0919 22:25:23.047631 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/machines/server.pem --> /etc/docker/server.pem (1208 bytes)
I0919 22:25:23.072319 203160 provision.go:87] duration metric: took 547.932448ms to configureAuth
I0919 22:25:23.072353 203160 ubuntu.go:206] setting minikube options for container-runtime
I0919 22:25:23.072625 203160 config.go:182] Loaded profile config "ha-434755": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0919 22:25:23.072688 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755-m03
I0919 22:25:23.090959 203160 main.go:141] libmachine: Using SSH client type: native
I0919 22:25:23.091171 203160 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32793 <nil> <nil>}
I0919 22:25:23.091183 203160 main.go:141] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I0919 22:25:23.228223 203160 main.go:141] libmachine: SSH cmd err, output: <nil>: overlay
I0919 22:25:23.228253 203160 ubuntu.go:71] root file system type: overlay
I0919 22:25:23.228422 203160 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I0919 22:25:23.228509 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755-m03
I0919 22:25:23.246883 203160 main.go:141] libmachine: Using SSH client type: native
I0919 22:25:23.247100 203160 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32793 <nil> <nil>}
I0919 22:25:23.247170 203160 main.go:141] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
Environment="NO_PROXY=192.168.49.2"
Environment="NO_PROXY=192.168.49.2,192.168.49.3"
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 \
-H fd:// --containerd=/run/containerd/containerd.sock \
-H unix:///var/run/docker.sock \
--default-ulimit=nofile=1048576:1048576 \
--tlsverify \
--tlscacert /etc/docker/ca.pem \
--tlscert /etc/docker/server.pem \
--tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I0919 22:25:23.398060 203160 main.go:141] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
Environment=NO_PROXY=192.168.49.2
Environment=NO_PROXY=192.168.49.2,192.168.49.3
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
I0919 22:25:23.398137 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755-m03
I0919 22:25:23.415663 203160 main.go:141] libmachine: Using SSH client type: native
I0919 22:25:23.415892 203160 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x840140] 0x842e40 <nil> [] 0s} 127.0.0.1 32793 <nil> <nil>}
I0919 22:25:23.415918 203160 main.go:141] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I0919 22:25:24.567023 203160 main.go:141] libmachine: SSH cmd err, output: <nil>: --- /lib/systemd/system/docker.service 2025-09-03 20:55:49.000000000 +0000
+++ /lib/systemd/system/docker.service.new 2025-09-19 22:25:23.396311399 +0000
@@ -9,23 +9,36 @@
[Service]
Type=notify
-# the default is not to use systemd for cgroups because the delegate issues still
-# exists and systemd currently does not support the cgroup feature set required
-# for containers run by docker
-ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
-ExecReload=/bin/kill -s HUP $MAINPID
-TimeoutStartSec=0
-RestartSec=2
Restart=always
+Environment=NO_PROXY=192.168.49.2
+Environment=NO_PROXY=192.168.49.2,192.168.49.3
+
+
+# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
+# The base configuration already specifies an 'ExecStart=...' command. The first directive
+# here is to clear out that command inherited from the base configuration. Without this,
+# the command from the base configuration and the command specified here are treated as
+# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
+# will catch this invalid input and refuse to start the service with an error like:
+# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
+
+# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
+# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
+ExecStart=
+ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
+ExecReload=/bin/kill -s HUP $MAINPID
+
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
+LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
-# Comment TasksMax if your systemd version does not support it.
-# Only systemd 226 and above support this option.
+# Uncomment TasksMax if your systemd version supports it.
+# Only systemd 226 and above support this version.
TasksMax=infinity
+TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
Synchronizing state of docker.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable docker
I0919 22:25:24.567060 203160 machine.go:96] duration metric: took 2.53292644s to provisionDockerMachine
I0919 22:25:24.567072 203160 client.go:171] duration metric: took 6.83435882s to LocalClient.Create
I0919 22:25:24.567092 203160 start.go:167] duration metric: took 6.834424553s to libmachine.API.Create "ha-434755"
I0919 22:25:24.567099 203160 start.go:293] postStartSetup for "ha-434755-m03" (driver="docker")
I0919 22:25:24.567108 203160 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0919 22:25:24.567161 203160 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0919 22:25:24.567201 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755-m03
I0919 22:25:24.584782 203160 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32793 SSHKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755-m03/id_rsa Username:docker}
I0919 22:25:24.683573 203160 ssh_runner.go:195] Run: cat /etc/os-release
I0919 22:25:24.686859 203160 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0919 22:25:24.686883 203160 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0919 22:25:24.686890 203160 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0919 22:25:24.686896 203160 info.go:137] Remote host: Ubuntu 22.04.5 LTS
I0919 22:25:24.686906 203160 filesync.go:126] Scanning /home/jenkins/minikube-integration/21594-142711/.minikube/addons for local assets ...
I0919 22:25:24.686958 203160 filesync.go:126] Scanning /home/jenkins/minikube-integration/21594-142711/.minikube/files for local assets ...
I0919 22:25:24.687030 203160 filesync.go:149] local asset: /home/jenkins/minikube-integration/21594-142711/.minikube/files/etc/ssl/certs/1463352.pem -> 1463352.pem in /etc/ssl/certs
I0919 22:25:24.687040 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/files/etc/ssl/certs/1463352.pem -> /etc/ssl/certs/1463352.pem
I0919 22:25:24.687116 203160 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I0919 22:25:24.695639 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/files/etc/ssl/certs/1463352.pem --> /etc/ssl/certs/1463352.pem (1708 bytes)
I0919 22:25:24.721360 203160 start.go:296] duration metric: took 154.24817ms for postStartSetup
I0919 22:25:24.721702 203160 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-434755-m03
I0919 22:25:24.739596 203160 profile.go:143] Saving config to /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/config.json ...
I0919 22:25:24.739824 203160 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0919 22:25:24.739863 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755-m03
I0919 22:25:24.756921 203160 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32793 SSHKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755-m03/id_rsa Username:docker}
I0919 22:25:24.848110 203160 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0919 22:25:24.852461 203160 start.go:128] duration metric: took 7.123445347s to createHost
I0919 22:25:24.852485 203160 start.go:83] releasing machines lock for "ha-434755-m03", held for 7.123651539s
I0919 22:25:24.852564 203160 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-434755-m03
I0919 22:25:24.871364 203160 out.go:179] * Found network options:
I0919 22:25:24.872460 203160 out.go:179] - NO_PROXY=192.168.49.2,192.168.49.3
W0919 22:25:24.873469 203160 proxy.go:120] fail to check proxy env: Error ip not in block
W0919 22:25:24.873491 203160 proxy.go:120] fail to check proxy env: Error ip not in block
W0919 22:25:24.873531 203160 proxy.go:120] fail to check proxy env: Error ip not in block
W0919 22:25:24.873550 203160 proxy.go:120] fail to check proxy env: Error ip not in block
I0919 22:25:24.873614 203160 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0919 22:25:24.873651 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755-m03
I0919 22:25:24.873674 203160 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0919 22:25:24.873726 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755-m03
I0919 22:25:24.891768 203160 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32793 SSHKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755-m03/id_rsa Username:docker}
I0919 22:25:24.892067 203160 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32793 SSHKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755-m03/id_rsa Username:docker}
I0919 22:25:25.055623 203160 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I0919 22:25:25.084377 203160 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I0919 22:25:25.084463 203160 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0919 22:25:25.110916 203160 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I0919 22:25:25.110954 203160 start.go:495] detecting cgroup driver to use...
I0919 22:25:25.110987 203160 detect.go:190] detected "systemd" cgroup driver on host os
I0919 22:25:25.111095 203160 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0919 22:25:25.128062 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I0919 22:25:25.138541 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0919 22:25:25.147920 203160 containerd.go:146] configuring containerd to use "systemd" as cgroup driver...
I0919 22:25:25.147980 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I0919 22:25:25.158084 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0919 22:25:25.167726 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0919 22:25:25.177468 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0919 22:25:25.187066 203160 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0919 22:25:25.196074 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0919 22:25:25.205874 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0919 22:25:25.215655 203160 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0919 22:25:25.225542 203160 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0919 22:25:25.233921 203160 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0919 22:25:25.241915 203160 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0919 22:25:25.307691 203160 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0919 22:25:25.379485 203160 start.go:495] detecting cgroup driver to use...
I0919 22:25:25.379559 203160 detect.go:190] detected "systemd" cgroup driver on host os
I0919 22:25:25.379617 203160 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I0919 22:25:25.392037 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I0919 22:25:25.402672 203160 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
I0919 22:25:25.417255 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I0919 22:25:25.428199 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0919 22:25:25.438890 203160 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I0919 22:25:25.454554 203160 ssh_runner.go:195] Run: which cri-dockerd
I0919 22:25:25.457748 203160 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I0919 22:25:25.467191 203160 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (192 bytes)
I0919 22:25:25.484961 203160 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I0919 22:25:25.554190 203160 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I0919 22:25:25.619726 203160 docker.go:575] configuring docker to use "systemd" as cgroup driver...
I0919 22:25:25.619771 203160 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (129 bytes)
I0919 22:25:25.638490 203160 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
I0919 22:25:25.649394 203160 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0919 22:25:25.718759 203160 ssh_runner.go:195] Run: sudo systemctl restart docker
I0919 22:25:26.508414 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I0919 22:25:26.521162 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I0919 22:25:26.532748 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0919 22:25:26.543940 203160 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I0919 22:25:26.612578 203160 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I0919 22:25:26.675793 203160 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0919 22:25:26.742908 203160 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I0919 22:25:26.767410 203160 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I0919 22:25:26.778129 203160 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0919 22:25:26.843785 203160 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I0919 22:25:26.914025 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0919 22:25:26.926481 203160 start.go:542] Will wait 60s for socket path /var/run/cri-dockerd.sock
I0919 22:25:26.926561 203160 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I0919 22:25:26.930135 203160 start.go:563] Will wait 60s for crictl version
I0919 22:25:26.930190 203160 ssh_runner.go:195] Run: which crictl
I0919 22:25:26.933448 203160 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0919 22:25:26.970116 203160 start.go:579] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 28.4.0
RuntimeApiVersion: v1
I0919 22:25:26.970186 203160 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0919 22:25:26.995443 203160 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0919 22:25:27.022587 203160 out.go:252] * Preparing Kubernetes v1.34.0 on Docker 28.4.0 ...
I0919 22:25:27.023535 203160 out.go:179] - env NO_PROXY=192.168.49.2
I0919 22:25:27.024458 203160 out.go:179] - env NO_PROXY=192.168.49.2,192.168.49.3
I0919 22:25:27.025398 203160 cli_runner.go:164] Run: docker network inspect ha-434755 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0919 22:25:27.041313 203160 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I0919 22:25:27.045217 203160 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0919 22:25:27.056734 203160 mustload.go:65] Loading cluster: ha-434755
I0919 22:25:27.056929 203160 config.go:182] Loaded profile config "ha-434755": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0919 22:25:27.057119 203160 cli_runner.go:164] Run: docker container inspect ha-434755 --format={{.State.Status}}
I0919 22:25:27.073694 203160 host.go:66] Checking if "ha-434755" exists ...
I0919 22:25:27.073923 203160 certs.go:68] Setting up /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755 for IP: 192.168.49.4
I0919 22:25:27.073935 203160 certs.go:194] generating shared ca certs ...
I0919 22:25:27.073947 203160 certs.go:226] acquiring lock for ca certs: {Name:mkc5df652d6204fd8687dfaaf83b02c6e10b58b2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:25:27.074070 203160 certs.go:235] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/21594-142711/.minikube/ca.key
I0919 22:25:27.074110 203160 certs.go:235] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/21594-142711/.minikube/proxy-client-ca.key
I0919 22:25:27.074119 203160 certs.go:256] generating profile certs ...
I0919 22:25:27.074189 203160 certs.go:359] skipping valid signed profile cert regeneration for "minikube-user": /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/client.key
I0919 22:25:27.074218 203160 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.key.fcdc46d6
I0919 22:25:27.074232 203160 crypto.go:68] Generating cert /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.crt.fcdc46d6 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.49.2 192.168.49.3 192.168.49.4 192.168.49.254]
I0919 22:25:27.130384 203160 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.crt.fcdc46d6 ...
I0919 22:25:27.130417 203160 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.crt.fcdc46d6: {Name:mke05473b288d96ff0a35c82b85fde4c8e83b40c Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:25:27.130606 203160 crypto.go:164] Writing key to /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.key.fcdc46d6 ...
I0919 22:25:27.130621 203160 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.key.fcdc46d6: {Name:mk192f98c5799773d19e5939501046d3123dfe7a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0919 22:25:27.130715 203160 certs.go:381] copying /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.crt.fcdc46d6 -> /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.crt
I0919 22:25:27.130866 203160 certs.go:385] copying /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.key.fcdc46d6 -> /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.key
I0919 22:25:27.131029 203160 certs.go:359] skipping valid signed profile cert regeneration for "aggregator": /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/proxy-client.key
I0919 22:25:27.131044 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/ca.crt -> /var/lib/minikube/certs/ca.crt
I0919 22:25:27.131061 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/ca.key -> /var/lib/minikube/certs/ca.key
I0919 22:25:27.131075 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/proxy-client-ca.crt -> /var/lib/minikube/certs/proxy-client-ca.crt
I0919 22:25:27.131089 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/proxy-client-ca.key -> /var/lib/minikube/certs/proxy-client-ca.key
I0919 22:25:27.131102 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.crt -> /var/lib/minikube/certs/apiserver.crt
I0919 22:25:27.131115 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.key -> /var/lib/minikube/certs/apiserver.key
I0919 22:25:27.131128 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/proxy-client.crt -> /var/lib/minikube/certs/proxy-client.crt
I0919 22:25:27.131141 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/proxy-client.key -> /var/lib/minikube/certs/proxy-client.key
I0919 22:25:27.131198 203160 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/146335.pem (1338 bytes)
W0919 22:25:27.131239 203160 certs.go:480] ignoring /home/jenkins/minikube-integration/21594-142711/.minikube/certs/146335_empty.pem, impossibly tiny 0 bytes
I0919 22:25:27.131248 203160 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca-key.pem (1675 bytes)
I0919 22:25:27.131275 203160 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/ca.pem (1078 bytes)
I0919 22:25:27.131303 203160 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/cert.pem (1123 bytes)
I0919 22:25:27.131331 203160 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/key.pem (1675 bytes)
I0919 22:25:27.131380 203160 certs.go:484] found cert: /home/jenkins/minikube-integration/21594-142711/.minikube/files/etc/ssl/certs/1463352.pem (1708 bytes)
I0919 22:25:27.131411 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/certs/146335.pem -> /usr/share/ca-certificates/146335.pem
I0919 22:25:27.131428 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/files/etc/ssl/certs/1463352.pem -> /usr/share/ca-certificates/1463352.pem
I0919 22:25:27.131442 203160 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/21594-142711/.minikube/ca.crt -> /usr/share/ca-certificates/minikubeCA.pem
I0919 22:25:27.131523 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755
I0919 22:25:27.159068 203160 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32783 SSHKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755/id_rsa Username:docker}
I0919 22:25:27.248746 203160 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/sa.pub
I0919 22:25:27.252715 203160 ssh_runner.go:447] scp /var/lib/minikube/certs/sa.pub --> memory (451 bytes)
I0919 22:25:27.267211 203160 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/sa.key
I0919 22:25:27.270851 203160 ssh_runner.go:447] scp /var/lib/minikube/certs/sa.key --> memory (1675 bytes)
I0919 22:25:27.283028 203160 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/front-proxy-ca.crt
I0919 22:25:27.286477 203160 ssh_runner.go:447] scp /var/lib/minikube/certs/front-proxy-ca.crt --> memory (1123 bytes)
I0919 22:25:27.298415 203160 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/front-proxy-ca.key
I0919 22:25:27.301783 203160 ssh_runner.go:447] scp /var/lib/minikube/certs/front-proxy-ca.key --> memory (1675 bytes)
I0919 22:25:27.314834 203160 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/etcd/ca.crt
I0919 22:25:27.318008 203160 ssh_runner.go:447] scp /var/lib/minikube/certs/etcd/ca.crt --> memory (1094 bytes)
I0919 22:25:27.330473 203160 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/etcd/ca.key
I0919 22:25:27.333984 203160 ssh_runner.go:447] scp /var/lib/minikube/certs/etcd/ca.key --> memory (1675 bytes)
I0919 22:25:27.345794 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0919 22:25:27.369657 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
I0919 22:25:27.393116 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0919 22:25:27.416244 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I0919 22:25:27.439315 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1444 bytes)
I0919 22:25:27.463476 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I0919 22:25:27.486915 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0919 22:25:27.510165 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I0919 22:25:27.534471 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/certs/146335.pem --> /usr/share/ca-certificates/146335.pem (1338 bytes)
I0919 22:25:27.560237 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/files/etc/ssl/certs/1463352.pem --> /usr/share/ca-certificates/1463352.pem (1708 bytes)
I0919 22:25:27.583106 203160 ssh_runner.go:362] scp /home/jenkins/minikube-integration/21594-142711/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0919 22:25:27.606007 203160 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/sa.pub (451 bytes)
I0919 22:25:27.623725 203160 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/sa.key (1675 bytes)
I0919 22:25:27.641200 203160 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/front-proxy-ca.crt (1123 bytes)
I0919 22:25:27.658321 203160 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/front-proxy-ca.key (1675 bytes)
I0919 22:25:27.675317 203160 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/etcd/ca.crt (1094 bytes)
I0919 22:25:27.692422 203160 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/etcd/ca.key (1675 bytes)
I0919 22:25:27.709455 203160 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (744 bytes)
I0919 22:25:27.727392 203160 ssh_runner.go:195] Run: openssl version
I0919 22:25:27.732862 203160 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0919 22:25:27.742299 203160 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0919 22:25:27.745678 203160 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Sep 19 22:15 /usr/share/ca-certificates/minikubeCA.pem
I0919 22:25:27.745728 203160 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0919 22:25:27.752398 203160 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0919 22:25:27.761605 203160 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/146335.pem && ln -fs /usr/share/ca-certificates/146335.pem /etc/ssl/certs/146335.pem"
I0919 22:25:27.771021 203160 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/146335.pem
I0919 22:25:27.774382 203160 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Sep 19 22:20 /usr/share/ca-certificates/146335.pem
I0919 22:25:27.774418 203160 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/146335.pem
I0919 22:25:27.781109 203160 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/146335.pem /etc/ssl/certs/51391683.0"
I0919 22:25:27.790814 203160 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/1463352.pem && ln -fs /usr/share/ca-certificates/1463352.pem /etc/ssl/certs/1463352.pem"
I0919 22:25:27.799904 203160 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/1463352.pem
I0919 22:25:27.803130 203160 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Sep 19 22:20 /usr/share/ca-certificates/1463352.pem
I0919 22:25:27.803179 203160 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/1463352.pem
I0919 22:25:27.809808 203160 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/1463352.pem /etc/ssl/certs/3ec20f2e.0"
I0919 22:25:27.819246 203160 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0919 22:25:27.822627 203160 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I0919 22:25:27.822680 203160 kubeadm.go:926] updating node {m03 192.168.49.4 8443 v1.34.0 docker true true} ...
I0919 22:25:27.822775 203160 kubeadm.go:938] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.34.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=ha-434755-m03 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.4
[Install]
config:
{KubernetesVersion:v1.34.0 ClusterName:ha-434755 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0919 22:25:27.822800 203160 kube-vip.go:115] generating kube-vip config ...
I0919 22:25:27.822828 203160 ssh_runner.go:195] Run: sudo sh -c "lsmod | grep ip_vs"
I0919 22:25:27.834857 203160 kube-vip.go:163] giving up enabling control-plane load-balancing as ipvs kernel modules appears not to be available: sudo sh -c "lsmod | grep ip_vs": Process exited with status 1
stdout:
stderr:
I0919 22:25:27.834926 203160 kube-vip.go:137] kube-vip config:
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
name: kube-vip
namespace: kube-system
spec:
containers:
- args:
- manager
env:
- name: vip_arp
value: "true"
- name: port
value: "8443"
- name: vip_nodename
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: vip_interface
value: eth0
- name: vip_cidr
value: "32"
- name: dns_mode
value: first
- name: cp_enable
value: "true"
- name: cp_namespace
value: kube-system
- name: vip_leaderelection
value: "true"
- name: vip_leasename
value: plndr-cp-lock
- name: vip_leaseduration
value: "5"
- name: vip_renewdeadline
value: "3"
- name: vip_retryperiod
value: "1"
- name: address
value: 192.168.49.254
- name: prometheus_server
value: :2112
image: ghcr.io/kube-vip/kube-vip:v1.0.0
imagePullPolicy: IfNotPresent
name: kube-vip
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_RAW
volumeMounts:
- mountPath: /etc/kubernetes/admin.conf
name: kubeconfig
hostAliases:
- hostnames:
- kubernetes
ip: 127.0.0.1
hostNetwork: true
volumes:
- hostPath:
path: "/etc/kubernetes/admin.conf"
name: kubeconfig
status: {}
I0919 22:25:27.834980 203160 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.0
I0919 22:25:27.843463 203160 binaries.go:44] Found k8s binaries, skipping transfer
I0919 22:25:27.843532 203160 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /etc/kubernetes/manifests
I0919 22:25:27.852030 203160 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (312 bytes)
I0919 22:25:27.869894 203160 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0919 22:25:27.888537 203160 ssh_runner.go:362] scp memory --> /etc/kubernetes/manifests/kube-vip.yaml (1358 bytes)
I0919 22:25:27.908135 203160 ssh_runner.go:195] Run: grep 192.168.49.254 control-plane.minikube.internal$ /etc/hosts
I0919 22:25:27.911776 203160 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.254 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0919 22:25:27.923898 203160 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0919 22:25:27.989986 203160 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0919 22:25:28.015049 203160 host.go:66] Checking if "ha-434755" exists ...
I0919 22:25:28.015341 203160 start.go:317] joinCluster: &{Name:ha-434755 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.48@sha256:7171c97a51623558720f8e5878e4f4637da093e2f2ed589997bedc6c1549b2b1 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.0 ClusterName:ha-434755 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[]
DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true} {Name:m02 IP:192.168.49.3 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true} {Name:m03 IP:192.168.49.4 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:false efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:f
alse logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:false storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticI
P: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0919 22:25:28.015488 203160 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm token create --print-join-command --ttl=0"
I0919 22:25:28.015561 203160 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-434755
I0919 22:25:28.036185 203160 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32783 SSHKeyPath:/home/jenkins/minikube-integration/21594-142711/.minikube/machines/ha-434755/id_rsa Username:docker}
I0919 22:25:28.179815 203160 start.go:343] trying to join control-plane node "m03" to cluster: &{Name:m03 IP:192.168.49.4 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}
I0919 22:25:28.179865 203160 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm join control-plane.minikube.internal:8443 --token ktda9v.620xzponyzx4q4u3 --discovery-token-ca-cert-hash sha256:6e34938835ca5de20dcd743043ff221a1493ef970b34561f39a513839570935a --ignore-preflight-errors=all --cri-socket unix:///var/run/cri-dockerd.sock --node-name=ha-434755-m03 --control-plane --apiserver-advertise-address=192.168.49.4 --apiserver-bind-port=8443"
I0919 22:25:39.101433 203160 ssh_runner.go:235] Completed: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.34.0:$PATH" kubeadm join control-plane.minikube.internal:8443 --token ktda9v.620xzponyzx4q4u3 --discovery-token-ca-cert-hash sha256:6e34938835ca5de20dcd743043ff221a1493ef970b34561f39a513839570935a --ignore-preflight-errors=all --cri-socket unix:///var/run/cri-dockerd.sock --node-name=ha-434755-m03 --control-plane --apiserver-advertise-address=192.168.49.4 --apiserver-bind-port=8443": (10.921540133s)
I0919 22:25:39.101473 203160 ssh_runner.go:195] Run: /bin/bash -c "sudo systemctl daemon-reload && sudo systemctl enable kubelet && sudo systemctl start kubelet"
I0919 22:25:39.324555 203160 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes ha-434755-m03 minikube.k8s.io/updated_at=2025_09_19T22_25_39_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=6e37ee63f758843bb5fe33c3a528c564c4b83d53 minikube.k8s.io/name=ha-434755 minikube.k8s.io/primary=false
I0919 22:25:39.399339 203160 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig taint nodes ha-434755-m03 node-role.kubernetes.io/control-plane:NoSchedule-
I0919 22:25:39.475025 203160 start.go:319] duration metric: took 11.459681606s to joinCluster
I0919 22:25:39.475121 203160 start.go:235] Will wait 6m0s for node &{Name:m03 IP:192.168.49.4 Port:8443 KubernetesVersion:v1.34.0 ContainerRuntime:docker ControlPlane:true Worker:true}
I0919 22:25:39.475445 203160 config.go:182] Loaded profile config "ha-434755": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.0
I0919 22:25:39.476384 203160 out.go:179] * Verifying Kubernetes components...
I0919 22:25:39.477465 203160 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0919 22:25:39.581053 203160 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0919 22:25:39.594584 203160 kapi.go:59] client config for ha-434755: &rest.Config{Host:"https://192.168.49.254:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/client.crt", KeyFile:"/home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/client.key", CAFile:"/home/jenkins/minikube-integration/21594-142711/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(nil), CAData:[]uint8(nil), NextProtos:[]string(n
il)}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x27f4a00), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), WarningHandlerWithContext:rest.WarningHandlerWithContext(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
W0919 22:25:39.594654 203160 kubeadm.go:483] Overriding stale ClientConfig host https://192.168.49.254:8443 with https://192.168.49.2:8443
I0919 22:25:39.594885 203160 node_ready.go:35] waiting up to 6m0s for node "ha-434755-m03" to be "Ready" ...
W0919 22:25:41.598871 203160 node_ready.go:57] node "ha-434755-m03" has "Ready":"False" status (will retry)
I0919 22:25:43.601543 203160 node_ready.go:49] node "ha-434755-m03" is "Ready"
I0919 22:25:43.601575 203160 node_ready.go:38] duration metric: took 4.006671921s for node "ha-434755-m03" to be "Ready" ...
I0919 22:25:43.601598 203160 api_server.go:52] waiting for apiserver process to appear ...
I0919 22:25:43.601660 203160 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0919 22:25:43.617376 203160 api_server.go:72] duration metric: took 4.142210029s to wait for apiserver process to appear ...
I0919 22:25:43.617405 203160 api_server.go:88] waiting for apiserver healthz status ...
I0919 22:25:43.617428 203160 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8443/healthz ...
I0919 22:25:43.622827 203160 api_server.go:279] https://192.168.49.2:8443/healthz returned 200:
ok
I0919 22:25:43.624139 203160 api_server.go:141] control plane version: v1.34.0
I0919 22:25:43.624164 203160 api_server.go:131] duration metric: took 6.751487ms to wait for apiserver health ...
I0919 22:25:43.624175 203160 system_pods.go:43] waiting for kube-system pods to appear ...
I0919 22:25:43.631480 203160 system_pods.go:59] 25 kube-system pods found
I0919 22:25:43.631526 203160 system_pods.go:61] "coredns-66bc5c9577-4lmln" [0f31e1cc-6bbb-4987-93c7-48e61288b609] Running
I0919 22:25:43.631534 203160 system_pods.go:61] "coredns-66bc5c9577-w8trg" [54431fee-554c-4c3c-9c81-d779981d36db] Running
I0919 22:25:43.631540 203160 system_pods.go:61] "etcd-ha-434755" [efa4db41-3739-45d6-ada5-d66dd5b82f46] Running
I0919 22:25:43.631545 203160 system_pods.go:61] "etcd-ha-434755-m02" [c47d7da8-6337-4062-a7d1-707ebc8f4df5] Running
I0919 22:25:43.631555 203160 system_pods.go:61] "etcd-ha-434755-m03" [6e3492c7-5026-460d-87b4-e3e52a2a36ab] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
I0919 22:25:43.631565 203160 system_pods.go:61] "kindnet-74q9s" [06bab6e9-ad22-4651-947e-723307c31d04] Running
I0919 22:25:43.631584 203160 system_pods.go:61] "kindnet-djvx4" [dd2c97ac-215c-4657-a3af-bf74603285af] Running
I0919 22:25:43.631592 203160 system_pods.go:61] "kindnet-jrkrv" [61220abf-7b4e-440a-a5aa-788c5991cacc] Pending / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni])
I0919 22:25:43.631602 203160 system_pods.go:61] "kube-apiserver-ha-434755" [fdcd2f64-6b9f-40ed-be07-24beef072bca] Running
I0919 22:25:43.631607 203160 system_pods.go:61] "kube-apiserver-ha-434755-m02" [bcc4bd8e-7086-4034-94f8-865e02212e7b] Running
I0919 22:25:43.631624 203160 system_pods.go:61] "kube-apiserver-ha-434755-m03" [acbc85b2-3446-4129-99c3-618e857912fb] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I0919 22:25:43.631633 203160 system_pods.go:61] "kube-controller-manager-ha-434755" [66066c78-f094-492d-9c71-a683cccd45a0] Running
I0919 22:25:43.631639 203160 system_pods.go:61] "kube-controller-manager-ha-434755-m02" [290b348b-6c1a-4891-990b-c943066ab212] Running
I0919 22:25:43.631652 203160 system_pods.go:61] "kube-controller-manager-ha-434755-m03" [3eb7c63e-1489-403e-9409-e9c347fff4c0] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
I0919 22:25:43.631660 203160 system_pods.go:61] "kube-proxy-4cnsm" [a477a521-e24b-449d-854f-c873cb517164] Running
I0919 22:25:43.631668 203160 system_pods.go:61] "kube-proxy-dzrbh" [6a5d3a9f-e63f-43df-bd58-596dc274f097] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy])
I0919 22:25:43.631675 203160 system_pods.go:61] "kube-proxy-gzpg8" [9d9843d9-c2ca-4751-8af5-f8fc91cf07c9] Running
I0919 22:25:43.631683 203160 system_pods.go:61] "kube-proxy-vwrdt" [e3337cd7-84eb-4ddd-921f-1ef42899cc96] Failed / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy])
I0919 22:25:43.631692 203160 system_pods.go:61] "kube-scheduler-ha-434755" [593d9f5b-40f3-47b7-aef2-b25348983754] Running
I0919 22:25:43.631698 203160 system_pods.go:61] "kube-scheduler-ha-434755-m02" [34109527-5e07-415c-9bfc-d500d75092ca] Running
I0919 22:25:43.631709 203160 system_pods.go:61] "kube-scheduler-ha-434755-m03" [65aaaab6-6371-4454-b404-7fe2f6c4e41a] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I0919 22:25:43.631718 203160 system_pods.go:61] "kube-vip-ha-434755" [eb65f5df-597d-4d36-b4c4-e33b1c1a6b35] Running
I0919 22:25:43.631724 203160 system_pods.go:61] "kube-vip-ha-434755-m02" [30071515-3665-4872-a66b-3d8ddccb0cae] Running
I0919 22:25:43.631732 203160 system_pods.go:61] "kube-vip-ha-434755-m03" [58560a63-dc5d-41bc-9805-e904f49b2cad] Running
I0919 22:25:43.631737 203160 system_pods.go:61] "storage-provisioner" [fb950ab4-a515-4298-b7f0-e01d6290af75] Running
I0919 22:25:43.631747 203160 system_pods.go:74] duration metric: took 7.564894ms to wait for pod list to return data ...
I0919 22:25:43.631760 203160 default_sa.go:34] waiting for default service account to be created ...
I0919 22:25:43.635188 203160 default_sa.go:45] found service account: "default"
I0919 22:25:43.635210 203160 default_sa.go:55] duration metric: took 3.443504ms for default service account to be created ...
I0919 22:25:43.635221 203160 system_pods.go:116] waiting for k8s-apps to be running ...
I0919 22:25:43.640825 203160 system_pods.go:86] 24 kube-system pods found
I0919 22:25:43.640849 203160 system_pods.go:89] "coredns-66bc5c9577-4lmln" [0f31e1cc-6bbb-4987-93c7-48e61288b609] Running
I0919 22:25:43.640854 203160 system_pods.go:89] "coredns-66bc5c9577-w8trg" [54431fee-554c-4c3c-9c81-d779981d36db] Running
I0919 22:25:43.640858 203160 system_pods.go:89] "etcd-ha-434755" [efa4db41-3739-45d6-ada5-d66dd5b82f46] Running
I0919 22:25:43.640861 203160 system_pods.go:89] "etcd-ha-434755-m02" [c47d7da8-6337-4062-a7d1-707ebc8f4df5] Running
I0919 22:25:43.640867 203160 system_pods.go:89] "etcd-ha-434755-m03" [6e3492c7-5026-460d-87b4-e3e52a2a36ab] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
I0919 22:25:43.640872 203160 system_pods.go:89] "kindnet-74q9s" [06bab6e9-ad22-4651-947e-723307c31d04] Running
I0919 22:25:43.640877 203160 system_pods.go:89] "kindnet-djvx4" [dd2c97ac-215c-4657-a3af-bf74603285af] Running
I0919 22:25:43.640883 203160 system_pods.go:89] "kindnet-jrkrv" [61220abf-7b4e-440a-a5aa-788c5991cacc] Pending / Ready:ContainersNotReady (containers with unready status: [kindnet-cni]) / ContainersReady:ContainersNotReady (containers with unready status: [kindnet-cni])
I0919 22:25:43.640889 203160 system_pods.go:89] "kube-apiserver-ha-434755" [fdcd2f64-6b9f-40ed-be07-24beef072bca] Running
I0919 22:25:43.640893 203160 system_pods.go:89] "kube-apiserver-ha-434755-m02" [bcc4bd8e-7086-4034-94f8-865e02212e7b] Running
I0919 22:25:43.640901 203160 system_pods.go:89] "kube-apiserver-ha-434755-m03" [acbc85b2-3446-4129-99c3-618e857912fb] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I0919 22:25:43.640907 203160 system_pods.go:89] "kube-controller-manager-ha-434755" [66066c78-f094-492d-9c71-a683cccd45a0] Running
I0919 22:25:43.640913 203160 system_pods.go:89] "kube-controller-manager-ha-434755-m02" [290b348b-6c1a-4891-990b-c943066ab212] Running
I0919 22:25:43.640922 203160 system_pods.go:89] "kube-controller-manager-ha-434755-m03" [3eb7c63e-1489-403e-9409-e9c347fff4c0] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
I0919 22:25:43.640927 203160 system_pods.go:89] "kube-proxy-4cnsm" [a477a521-e24b-449d-854f-c873cb517164] Running
I0919 22:25:43.640932 203160 system_pods.go:89] "kube-proxy-dzrbh" [6a5d3a9f-e63f-43df-bd58-596dc274f097] Pending / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy])
I0919 22:25:43.640937 203160 system_pods.go:89] "kube-proxy-gzpg8" [9d9843d9-c2ca-4751-8af5-f8fc91cf07c9] Running
I0919 22:25:43.640941 203160 system_pods.go:89] "kube-scheduler-ha-434755" [593d9f5b-40f3-47b7-aef2-b25348983754] Running
I0919 22:25:43.640944 203160 system_pods.go:89] "kube-scheduler-ha-434755-m02" [34109527-5e07-415c-9bfc-d500d75092ca] Running
I0919 22:25:43.640952 203160 system_pods.go:89] "kube-scheduler-ha-434755-m03" [65aaaab6-6371-4454-b404-7fe2f6c4e41a] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I0919 22:25:43.640958 203160 system_pods.go:89] "kube-vip-ha-434755" [eb65f5df-597d-4d36-b4c4-e33b1c1a6b35] Running
I0919 22:25:43.640966 203160 system_pods.go:89] "kube-vip-ha-434755-m02" [30071515-3665-4872-a66b-3d8ddccb0cae] Running
I0919 22:25:43.640971 203160 system_pods.go:89] "kube-vip-ha-434755-m03" [58560a63-dc5d-41bc-9805-e904f49b2cad] Running
I0919 22:25:43.640974 203160 system_pods.go:89] "storage-provisioner" [fb950ab4-a515-4298-b7f0-e01d6290af75] Running
I0919 22:25:43.640981 203160 system_pods.go:126] duration metric: took 5.753999ms to wait for k8s-apps to be running ...
I0919 22:25:43.640989 203160 system_svc.go:44] waiting for kubelet service to be running ....
I0919 22:25:43.641031 203160 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I0919 22:25:43.653532 203160 system_svc.go:56] duration metric: took 12.534189ms WaitForService to wait for kubelet
I0919 22:25:43.653556 203160 kubeadm.go:578] duration metric: took 4.178399256s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0919 22:25:43.653573 203160 node_conditions.go:102] verifying NodePressure condition ...
I0919 22:25:43.656435 203160 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I0919 22:25:43.656455 203160 node_conditions.go:123] node cpu capacity is 8
I0919 22:25:43.656467 203160 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I0919 22:25:43.656470 203160 node_conditions.go:123] node cpu capacity is 8
I0919 22:25:43.656475 203160 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I0919 22:25:43.656479 203160 node_conditions.go:123] node cpu capacity is 8
I0919 22:25:43.656484 203160 node_conditions.go:105] duration metric: took 2.906956ms to run NodePressure ...
I0919 22:25:43.656557 203160 start.go:241] waiting for startup goroutines ...
I0919 22:25:43.656587 203160 start.go:255] writing updated cluster config ...
I0919 22:25:43.656893 203160 ssh_runner.go:195] Run: rm -f paused
I0919 22:25:43.660610 203160 pod_ready.go:37] extra waiting up to 4m0s for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I0919 22:25:43.661067 203160 kapi.go:59] client config for ha-434755: &rest.Config{Host:"https://192.168.49.254:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/client.crt", KeyFile:"/home/jenkins/minikube-integration/21594-142711/.minikube/profiles/ha-434755/client.key", CAFile:"/home/jenkins/minikube-integration/21594-142711/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(nil), CAData:[]uint8(nil), NextProtos:[]string(n
il)}, UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x27f4a00), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), WarningHandlerWithContext:rest.WarningHandlerWithContext(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
I0919 22:25:43.664242 203160 pod_ready.go:83] waiting for pod "coredns-66bc5c9577-4lmln" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:43.669047 203160 pod_ready.go:94] pod "coredns-66bc5c9577-4lmln" is "Ready"
I0919 22:25:43.669069 203160 pod_ready.go:86] duration metric: took 4.804098ms for pod "coredns-66bc5c9577-4lmln" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:43.669076 203160 pod_ready.go:83] waiting for pod "coredns-66bc5c9577-w8trg" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:43.673294 203160 pod_ready.go:94] pod "coredns-66bc5c9577-w8trg" is "Ready"
I0919 22:25:43.673313 203160 pod_ready.go:86] duration metric: took 4.232517ms for pod "coredns-66bc5c9577-w8trg" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:43.676291 203160 pod_ready.go:83] waiting for pod "etcd-ha-434755" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:43.681202 203160 pod_ready.go:94] pod "etcd-ha-434755" is "Ready"
I0919 22:25:43.681224 203160 pod_ready.go:86] duration metric: took 4.891614ms for pod "etcd-ha-434755" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:43.681231 203160 pod_ready.go:83] waiting for pod "etcd-ha-434755-m02" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:43.685174 203160 pod_ready.go:94] pod "etcd-ha-434755-m02" is "Ready"
I0919 22:25:43.685197 203160 pod_ready.go:86] duration metric: took 3.961188ms for pod "etcd-ha-434755-m02" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:43.685203 203160 pod_ready.go:83] waiting for pod "etcd-ha-434755-m03" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:43.861561 203160 request.go:683] "Waited before sending request" delay="176.248264ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/etcd-ha-434755-m03"
I0919 22:25:44.062212 203160 request.go:683] "Waited before sending request" delay="197.34334ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-434755-m03"
I0919 22:25:44.261544 203160 request.go:683] "Waited before sending request" delay="75.158894ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/etcd-ha-434755-m03"
I0919 22:25:44.461584 203160 request.go:683] "Waited before sending request" delay="196.309622ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-434755-m03"
I0919 22:25:44.861909 203160 request.go:683] "Waited before sending request" delay="172.267033ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-434755-m03"
I0919 22:25:45.261844 203160 request.go:683] "Waited before sending request" delay="72.222149ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-434755-m03"
W0919 22:25:45.690633 203160 pod_ready.go:104] pod "etcd-ha-434755-m03" is not "Ready", error: <nil>
I0919 22:25:46.192067 203160 pod_ready.go:94] pod "etcd-ha-434755-m03" is "Ready"
I0919 22:25:46.192098 203160 pod_ready.go:86] duration metric: took 2.50688828s for pod "etcd-ha-434755-m03" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:46.262400 203160 request.go:683] "Waited before sending request" delay="70.17118ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods?labelSelector=component%3Dkube-apiserver"
I0919 22:25:46.266643 203160 pod_ready.go:83] waiting for pod "kube-apiserver-ha-434755" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:46.462133 203160 request.go:683] "Waited before sending request" delay="195.353683ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-apiserver-ha-434755"
I0919 22:25:46.661695 203160 request.go:683] "Waited before sending request" delay="196.23519ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-434755"
I0919 22:25:46.664990 203160 pod_ready.go:94] pod "kube-apiserver-ha-434755" is "Ready"
I0919 22:25:46.665013 203160 pod_ready.go:86] duration metric: took 398.342895ms for pod "kube-apiserver-ha-434755" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:46.665024 203160 pod_ready.go:83] waiting for pod "kube-apiserver-ha-434755-m02" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:46.862485 203160 request.go:683] "Waited before sending request" delay="197.349925ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-apiserver-ha-434755-m02"
I0919 22:25:47.062458 203160 request.go:683] "Waited before sending request" delay="196.27598ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-434755-m02"
I0919 22:25:47.066027 203160 pod_ready.go:94] pod "kube-apiserver-ha-434755-m02" is "Ready"
I0919 22:25:47.066062 203160 pod_ready.go:86] duration metric: took 401.030788ms for pod "kube-apiserver-ha-434755-m02" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:47.066074 203160 pod_ready.go:83] waiting for pod "kube-apiserver-ha-434755-m03" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:47.262536 203160 request.go:683] "Waited before sending request" delay="196.349445ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-apiserver-ha-434755-m03"
I0919 22:25:47.461658 203160 request.go:683] "Waited before sending request" delay="196.15827ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-434755-m03"
I0919 22:25:47.662339 203160 request.go:683] "Waited before sending request" delay="95.242557ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-apiserver-ha-434755-m03"
I0919 22:25:47.861611 203160 request.go:683] "Waited before sending request" delay="196.286818ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-434755-m03"
I0919 22:25:48.262313 203160 request.go:683] "Waited before sending request" delay="192.342763ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-434755-m03"
I0919 22:25:48.661859 203160 request.go:683] "Waited before sending request" delay="92.219172ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-434755-m03"
W0919 22:25:49.071933 203160 pod_ready.go:104] pod "kube-apiserver-ha-434755-m03" is not "Ready", error: <nil>
I0919 22:25:51.071739 203160 pod_ready.go:94] pod "kube-apiserver-ha-434755-m03" is "Ready"
I0919 22:25:51.071767 203160 pod_ready.go:86] duration metric: took 4.005686408s for pod "kube-apiserver-ha-434755-m03" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:51.074543 203160 pod_ready.go:83] waiting for pod "kube-controller-manager-ha-434755" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:51.262152 203160 request.go:683] "Waited before sending request" delay="185.334685ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-434755"
I0919 22:25:51.265630 203160 pod_ready.go:94] pod "kube-controller-manager-ha-434755" is "Ready"
I0919 22:25:51.265657 203160 pod_ready.go:86] duration metric: took 191.092666ms for pod "kube-controller-manager-ha-434755" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:51.265666 203160 pod_ready.go:83] waiting for pod "kube-controller-manager-ha-434755-m02" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:51.462098 203160 request.go:683] "Waited before sending request" delay="196.345826ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-434755-m02"
I0919 22:25:51.661912 203160 request.go:683] "Waited before sending request" delay="196.187823ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-434755-m02"
I0919 22:25:51.665191 203160 pod_ready.go:94] pod "kube-controller-manager-ha-434755-m02" is "Ready"
I0919 22:25:51.665224 203160 pod_ready.go:86] duration metric: took 399.551288ms for pod "kube-controller-manager-ha-434755-m02" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:51.665233 203160 pod_ready.go:83] waiting for pod "kube-controller-manager-ha-434755-m03" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:51.861619 203160 request.go:683] "Waited before sending request" delay="196.276968ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-434755-m03"
I0919 22:25:52.062202 203160 request.go:683] "Waited before sending request" delay="197.351779ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-434755-m03"
I0919 22:25:52.065578 203160 pod_ready.go:94] pod "kube-controller-manager-ha-434755-m03" is "Ready"
I0919 22:25:52.065604 203160 pod_ready.go:86] duration metric: took 400.365679ms for pod "kube-controller-manager-ha-434755-m03" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:52.262003 203160 request.go:683] "Waited before sending request" delay="196.29708ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods?labelSelector=k8s-app%3Dkube-proxy"
I0919 22:25:52.265548 203160 pod_ready.go:83] waiting for pod "kube-proxy-4cnsm" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:52.462021 203160 request.go:683] "Waited before sending request" delay="196.352536ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-proxy-4cnsm"
I0919 22:25:52.662519 203160 request.go:683] "Waited before sending request" delay="196.351016ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-434755-m02"
I0919 22:25:52.665831 203160 pod_ready.go:94] pod "kube-proxy-4cnsm" is "Ready"
I0919 22:25:52.665859 203160 pod_ready.go:86] duration metric: took 400.28275ms for pod "kube-proxy-4cnsm" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:52.665868 203160 pod_ready.go:83] waiting for pod "kube-proxy-dzrbh" in "kube-system" namespace to be "Ready" or be gone ...
I0919 22:25:52.862291 203160 request.go:683] "Waited before sending request" delay="196.344667ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-proxy-dzrbh"
I0919 22:25:53.061976 203160 request.go:683] "Waited before sending request" delay="196.35101ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-434755-m03"
I0919 22:25:53.261911 203160 request.go:683] "Waited before sending request" delay="95.241357ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/namespaces/kube-system/pods/kube-proxy-dzrbh"
I0919 22:25:53.461590 203160 request.go:683] "Waited before sending request" delay="196.28491ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-434755-m03"
I0919 22:25:53.862244 203160 request.go:683] "Waited before sending request" delay="192.346086ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-434755-m03"
I0919 22:25:54.261842 203160 request.go:683] "Waited before sending request" delay="92.230453ms" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://192.168.49.254:8443/api/v1/nodes/ha-434755-m03"
W0919 22:25:54.671717 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:25:56.671839 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:25:58.672473 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:01.172572 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:03.672671 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:06.172469 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:08.672353 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:11.172405 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:13.672314 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:16.172799 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:18.672196 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:20.672298 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:23.171528 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:25.172008 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:27.172570 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:29.672449 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:31.672563 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:33.672868 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:36.170989 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:38.171892 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:40.172022 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:42.172174 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:44.671993 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:47.171063 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:49.172486 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:51.672732 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:54.172023 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:56.172144 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:26:58.671775 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:00.671992 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:03.171993 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:05.671723 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:08.171842 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:10.172121 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:12.672014 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:15.172390 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:17.172822 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:19.672126 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:21.673333 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:24.171769 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:26.672310 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:29.171411 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:31.171872 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:33.172386 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:35.172451 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:37.672546 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:40.172235 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:42.172963 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:44.671777 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:46.671841 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:49.171918 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:51.172295 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:53.671812 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:55.672948 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:27:58.171734 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:00.172103 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:02.174861 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:04.672033 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:07.171816 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:09.671792 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:11.672609 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:14.171130 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:16.172329 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:18.672102 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:21.172674 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:23.173027 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:25.672026 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:28.171975 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:30.672302 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:32.672601 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:35.171532 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:37.171862 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:39.672084 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:42.172811 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:44.672206 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:46.672508 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:49.171457 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:51.172154 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:53.172276 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:55.672125 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:28:58.173041 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:29:00.672216 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:29:03.172384 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:29:05.673458 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:29:08.172666 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:29:10.672118 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:29:13.171914 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:29:15.172099 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:29:17.671977 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:29:20.172061 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:29:22.671971 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:29:24.672271 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:29:27.171769 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:29:29.172036 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:29:31.172563 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:29:33.672797 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:29:36.171859 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:29:38.671554 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:29:41.171621 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
W0919 22:29:43.172570 203160 pod_ready.go:104] pod "kube-proxy-dzrbh" is not "Ready", error: <nil>
I0919 22:29:43.661688 203160 pod_ready.go:86] duration metric: took 3m50.995803943s for pod "kube-proxy-dzrbh" in "kube-system" namespace to be "Ready" or be gone ...
W0919 22:29:43.661752 203160 pod_ready.go:65] not all pods in "kube-system" namespace with "k8s-app=kube-proxy" label are "Ready", will retry: waitPodCondition: context deadline exceeded
I0919 22:29:43.661771 203160 pod_ready.go:40] duration metric: took 4m0.001130626s for extra waiting for all "kube-system" pods having one of [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] labels to be "Ready" ...
I0919 22:29:43.663339 203160 out.go:203]
W0919 22:29:43.664381 203160 out.go:285] X Exiting due to GUEST_START: extra waiting: WaitExtra: context deadline exceeded
I0919 22:29:43.665560 203160 out.go:203]
==> Docker <==
Sep 19 22:24:49 ha-434755 cri-dockerd[1430]: time="2025-09-19T22:24:49Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-66bc5c9577-4lmln_kube-system\": unexpected command output Device \"eth0\" does not exist.\n with error: exit status 1"
Sep 19 22:24:49 ha-434755 cri-dockerd[1430]: time="2025-09-19T22:24:49Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/41bb0b28153e190e783092cfcd3e860459231dd55e7746d59828a10d315188f9/resolv.conf as [nameserver 192.168.49.1 search local europe-west4-a.c.k8s-minikube.internal c.k8s-minikube.internal google.internal options edns0 trust-ad ndots:0]"
Sep 19 22:24:49 ha-434755 cri-dockerd[1430]: time="2025-09-19T22:24:49Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-66bc5c9577-4lmln_kube-system\": unexpected command output Device \"eth0\" does not exist.\n with error: exit status 1"
Sep 19 22:24:49 ha-434755 cri-dockerd[1430]: time="2025-09-19T22:24:49Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-66bc5c9577-w8trg_kube-system\": unexpected command output Device \"eth0\" does not exist.\n with error: exit status 1"
Sep 19 22:24:53 ha-434755 cri-dockerd[1430]: time="2025-09-19T22:24:53Z" level=info msg="Stop pulling image docker.io/kindest/kindnetd:v20250512-df8de77b: Status: Downloaded newer image for kindest/kindnetd:v20250512-df8de77b"
Sep 19 22:24:54 ha-434755 cri-dockerd[1430]: time="2025-09-19T22:24:54Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:10.244.0.0/24,},}"
Sep 19 22:25:02 ha-434755 dockerd[1124]: time="2025-09-19T22:25:02.225956908Z" level=info msg="ignoring event" container=f7365ae03012282e042fcdbb9d87e94b89928381e3b6f701b58d0e425f83b14a module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 19 22:25:02 ha-434755 dockerd[1124]: time="2025-09-19T22:25:02.226083882Z" level=info msg="ignoring event" container=fd0a3ab5f285697717d070472745c94ac46d7e376804e2b2690d8192c539ce06 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 19 22:25:02 ha-434755 dockerd[1124]: time="2025-09-19T22:25:02.287898199Z" level=info msg="ignoring event" container=b987cc756018033717c69e468416998c2b07c3a7a6aab5e56b199bbd88fb51fe module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 19 22:25:02 ha-434755 dockerd[1124]: time="2025-09-19T22:25:02.287938972Z" level=info msg="ignoring event" container=de54ed5bb258a7d8937149fcb9be16e03e34cd6b8786d874a980e9f9ec26d429 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 19 22:25:02 ha-434755 cri-dockerd[1430]: time="2025-09-19T22:25:02Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/89b975ea350c8ada63866afcc9dfe8d144799fa6442ff30b95e39235ca314606/resolv.conf as [nameserver 192.168.49.1 search local europe-west4-a.c.k8s-minikube.internal c.k8s-minikube.internal google.internal options edns0 trust-ad ndots:0]"
Sep 19 22:25:02 ha-434755 cri-dockerd[1430]: time="2025-09-19T22:25:02Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/bc57496cf8c97a97999359a9838b6036be50e94cb061c0b1a8b8d03c6c47882f/resolv.conf as [nameserver 192.168.49.1 search local europe-west4-a.c.k8s-minikube.internal c.k8s-minikube.internal google.internal options ndots:0 edns0 trust-ad]"
Sep 19 22:25:02 ha-434755 cri-dockerd[1430]: time="2025-09-19T22:25:02Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-66bc5c9577-w8trg_kube-system\": unexpected command output Device \"eth0\" does not exist.\n with error: exit status 1"
Sep 19 22:25:02 ha-434755 cri-dockerd[1430]: time="2025-09-19T22:25:02Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-66bc5c9577-4lmln_kube-system\": unexpected command output Device \"eth0\" does not exist.\n with error: exit status 1"
Sep 19 22:25:02 ha-434755 cri-dockerd[1430]: time="2025-09-19T22:25:02Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-66bc5c9577-4lmln_kube-system\": unexpected command output Device \"eth0\" does not exist.\n with error: exit status 1"
Sep 19 22:25:02 ha-434755 cri-dockerd[1430]: time="2025-09-19T22:25:02Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-66bc5c9577-w8trg_kube-system\": unexpected command output Device \"eth0\" does not exist.\n with error: exit status 1"
Sep 19 22:25:03 ha-434755 cri-dockerd[1430]: time="2025-09-19T22:25:03Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-66bc5c9577-w8trg_kube-system\": unexpected command output Device \"eth0\" does not exist.\n with error: exit status 1"
Sep 19 22:25:03 ha-434755 cri-dockerd[1430]: time="2025-09-19T22:25:03Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-66bc5c9577-4lmln_kube-system\": unexpected command output Device \"eth0\" does not exist.\n with error: exit status 1"
Sep 19 22:25:15 ha-434755 dockerd[1124]: time="2025-09-19T22:25:15.634903380Z" level=info msg="ignoring event" container=e66b377f63cd024c271469a44f4844c50e6d21b7cd4f5be0240558825f482966 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 19 22:25:15 ha-434755 dockerd[1124]: time="2025-09-19T22:25:15.634965117Z" level=info msg="ignoring event" container=e797401c93bc72db5f536dfa81292a1cbcf7a082f6aa091231b53030ca4c3fe8 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 19 22:25:15 ha-434755 dockerd[1124]: time="2025-09-19T22:25:15.702221010Z" level=info msg="ignoring event" container=89b975ea350c8ada63866afcc9dfe8d144799fa6442ff30b95e39235ca314606 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 19 22:25:15 ha-434755 dockerd[1124]: time="2025-09-19T22:25:15.702289485Z" level=info msg="ignoring event" container=bc57496cf8c97a97999359a9838b6036be50e94cb061c0b1a8b8d03c6c47882f module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 19 22:25:15 ha-434755 cri-dockerd[1430]: time="2025-09-19T22:25:15Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/62cd9dd3b99a779d6b1ffe72046bafeef3d781c016335de5886ea2ca70bf69d4/resolv.conf as [nameserver 192.168.49.1 search local europe-west4-a.c.k8s-minikube.internal c.k8s-minikube.internal google.internal options edns0 trust-ad ndots:0]"
Sep 19 22:25:15 ha-434755 cri-dockerd[1430]: time="2025-09-19T22:25:15Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/b69dcaba1fe3e6996e4b1abe588d8ed828c8e1b07e61838a54d5c6eea3a368de/resolv.conf as [nameserver 192.168.49.1 search local europe-west4-a.c.k8s-minikube.internal c.k8s-minikube.internal google.internal options trust-ad ndots:0 edns0]"
Sep 19 22:25:17 ha-434755 dockerd[1124]: time="2025-09-19T22:25:17.979227230Z" level=info msg="ignoring event" container=7dcf79d61a67e78a7e98abac24d2bff68653f6f436028d21debd03806fd167ff module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD
37e3f52bd7982 6e38f40d628db 4 minutes ago Running storage-provisioner 1 af5b94805e3a7 storage-provisioner
276fb29221693 52546a367cc9e 4 minutes ago Running coredns 2 b69dcaba1fe3e coredns-66bc5c9577-w8trg
88736f55e64e2 52546a367cc9e 4 minutes ago Running coredns 2 62cd9dd3b99a7 coredns-66bc5c9577-4lmln
e797401c93bc7 52546a367cc9e 4 minutes ago Exited coredns 1 bc57496cf8c97 coredns-66bc5c9577-4lmln
e66b377f63cd0 52546a367cc9e 4 minutes ago Exited coredns 1 89b975ea350c8 coredns-66bc5c9577-w8trg
acbbcaa7a50ef kindest/kindnetd@sha256:07a4b3fe0077a0ae606cc0a200fc25a28fa64dcc30b8d311b461089969449f9a 4 minutes ago Running kindnet-cni 0 41bb0b28153e1 kindnet-djvx4
c4058cbf0779f df0860106674d 4 minutes ago Running kube-proxy 0 0bfeca1ad0bad kube-proxy-gzpg8
7dcf79d61a67e 6e38f40d628db 4 minutes ago Exited storage-provisioner 0 af5b94805e3a7 storage-provisioner
0fc6714ebb308 ghcr.io/kube-vip/kube-vip@sha256:4f256554a83a6d824ea9c5307450a2c3fd132e09c52b339326f94fefaf67155c 5 minutes ago Running kube-vip 0 fb11db0e55f38 kube-vip-ha-434755
baeef3d333816 90550c43ad2bc 5 minutes ago Running kube-apiserver 0 ba9ef91c2ce68 kube-apiserver-ha-434755
f040530b17342 5f1f5298c888d 5 minutes ago Running etcd 0 aae975e95bddb etcd-ha-434755
3b75df9b742b1 46169d968e920 5 minutes ago Running kube-scheduler 0 1e4f3e71f1dc3 kube-scheduler-ha-434755
9d7035076f5b1 a0af72f2ec6d6 5 minutes ago Running kube-controller-manager 0 88eef40585d59 kube-controller-manager-ha-434755
==> coredns [276fb2922169] <==
maxprocs: Leaving GOMAXPROCS=8: CPU quota undefined
.:53
[INFO] plugin/reload: Running configuration SHA512 = 9e2996f8cb67ac53e0259ab1f8d615d07d1beb0bd07e6a1e39769c3bf486a905bb991cc47f8d2f14d0d3a90a87dfc625a0b4c524fed169d8158c40657c0694b1
CoreDNS-1.12.1
linux/amd64, go1.24.1, 707c7c1
[INFO] 127.0.0.1:37194 - 28984 "HINFO IN 5214134008379897248.7815776382534054762. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.027124502s
==> coredns [88736f55e64e] <==
maxprocs: Leaving GOMAXPROCS=8: CPU quota undefined
.:53
[INFO] plugin/reload: Running configuration SHA512 = 9e2996f8cb67ac53e0259ab1f8d615d07d1beb0bd07e6a1e39769c3bf486a905bb991cc47f8d2f14d0d3a90a87dfc625a0b4c524fed169d8158c40657c0694b1
CoreDNS-1.12.1
linux/amd64, go1.24.1, 707c7c1
[INFO] 127.0.0.1:58640 - 48004 "HINFO IN 2245373388099208717.3878449857039646311. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.027376041s
==> coredns [e66b377f63cd] <==
[ERROR] plugin/kubernetes: Unhandled Error
[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.32.3/tools/cache/reflector.go:251: failed to list *v1.Service: Get "https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: connect: network is unreachable
[ERROR] plugin/kubernetes: Unhandled Error
[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.32.3/tools/cache/reflector.go:251: failed to list *v1.Namespace: Get "https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: connect: network is unreachable
[ERROR] plugin/kubernetes: Unhandled Error
[INFO] SIGTERM: Shutting down servers then terminating
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[WARNING] plugin/kubernetes: starting server with unsynced Kubernetes API
.:53
[INFO] plugin/reload: Running configuration SHA512 = 9e2996f8cb67ac53e0259ab1f8d615d07d1beb0bd07e6a1e39769c3bf486a905bb991cc47f8d2f14d0d3a90a87dfc625a0b4c524fed169d8158c40657c0694b1
CoreDNS-1.12.1
linux/amd64, go1.24.1, 707c7c1
[INFO] plugin/health: Going into lameduck mode for 5s
[INFO] 127.0.0.1:40758 - 42383 "HINFO IN 7596401662938690273.2510453177671440305. udp 57 false 512" - - 0 5.000156982s
[ERROR] plugin/errors: 2 7596401662938690273.2510453177671440305. HINFO: dial udp 192.168.49.1:53: connect: network is unreachable
[INFO] 127.0.0.1:56884 - 59881 "HINFO IN 7596401662938690273.2510453177671440305. udp 57 false 512" - - 0 5.000107168s
[ERROR] plugin/errors: 2 7596401662938690273.2510453177671440305. HINFO: dial udp 192.168.49.1:53: connect: network is unreachable
==> coredns [e797401c93bc] <==
[ERROR] plugin/kubernetes: Unhandled Error
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.32.3/tools/cache/reflector.go:251: failed to list *v1.Namespace: Get "https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: connect: network is unreachable
[ERROR] plugin/kubernetes: Unhandled Error
[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.32.3/tools/cache/reflector.go:251: failed to list *v1.Service: Get "https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: connect: network is unreachable
[ERROR] plugin/kubernetes: Unhandled Error
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] SIGTERM: Shutting down servers then terminating
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[WARNING] plugin/kubernetes: starting server with unsynced Kubernetes API
.:53
[INFO] plugin/reload: Running configuration SHA512 = 9e2996f8cb67ac53e0259ab1f8d615d07d1beb0bd07e6a1e39769c3bf486a905bb991cc47f8d2f14d0d3a90a87dfc625a0b4c524fed169d8158c40657c0694b1
CoreDNS-1.12.1
linux/amd64, go1.24.1, 707c7c1
[INFO] plugin/health: Going into lameduck mode for 5s
[INFO] 127.0.0.1:43652 - 47211 "HINFO IN 2104433587108610861.5063388797386552334. udp 57 false 512" - - 0 5.000171362s
[ERROR] plugin/errors: 2 2104433587108610861.5063388797386552334. HINFO: dial udp 192.168.49.1:53: connect: network is unreachable
[INFO] 127.0.0.1:44505 - 54581 "HINFO IN 2104433587108610861.5063388797386552334. udp 57 false 512" - - 0 5.000102051s
[ERROR] plugin/errors: 2 2104433587108610861.5063388797386552334. HINFO: dial udp 192.168.49.1:53: connect: network is unreachable
==> describe nodes <==
Name: ha-434755
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=ha-434755
kubernetes.io/os=linux
minikube.k8s.io/commit=6e37ee63f758843bb5fe33c3a528c564c4b83d53
minikube.k8s.io/name=ha-434755
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2025_09_19T22_24_45_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Fri, 19 Sep 2025 22:24:39 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: ha-434755
AcquireTime: <unset>
RenewTime: Fri, 19 Sep 2025 22:29:40 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Fri, 19 Sep 2025 22:28:28 +0000 Fri, 19 Sep 2025 22:24:38 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Fri, 19 Sep 2025 22:28:28 +0000 Fri, 19 Sep 2025 22:24:38 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Fri, 19 Sep 2025 22:28:28 +0000 Fri, 19 Sep 2025 22:24:38 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Fri, 19 Sep 2025 22:28:28 +0000 Fri, 19 Sep 2025 22:24:40 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.49.2
Hostname: ha-434755
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863452Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863452Ki
pods: 110
System Info:
Machine ID: 7b1fb77ef5024d9e96bd6c3ede9949e2
System UUID: 777ab209-7204-4aa7-96a4-31869ecf7396
Boot ID: f409d6b2-5b2d-482a-a418-1c1a417dfa0a
Kernel Version: 6.8.0-1037-gcp
OS Image: Ubuntu 22.04.5 LTS
Operating System: linux
Architecture: amd64
Container Runtime Version: docker://28.4.0
Kubelet Version: v1.34.0
Kube-Proxy Version:
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (10 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
kube-system coredns-66bc5c9577-4lmln 100m (1%) 0 (0%) 70Mi (0%) 170Mi (0%) 4m57s
kube-system coredns-66bc5c9577-w8trg 100m (1%) 0 (0%) 70Mi (0%) 170Mi (0%) 4m57s
kube-system etcd-ha-434755 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 5m
kube-system kindnet-djvx4 100m (1%) 100m (1%) 50Mi (0%) 50Mi (0%) 4m57s
kube-system kube-apiserver-ha-434755 250m (3%) 0 (0%) 0 (0%) 0 (0%) 5m2s
kube-system kube-controller-manager-ha-434755 200m (2%) 0 (0%) 0 (0%) 0 (0%) 5m1s
kube-system kube-proxy-gzpg8 0 (0%) 0 (0%) 0 (0%) 0 (0%) 4m57s
kube-system kube-scheduler-ha-434755 100m (1%) 0 (0%) 0 (0%) 0 (0%) 5m1s
kube-system kube-vip-ha-434755 0 (0%) 0 (0%) 0 (0%) 0 (0%) 5m5s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 4m57s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 950m (11%) 100m (1%)
memory 290Mi (0%) 390Mi (1%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 4m55s kube-proxy
Normal NodeAllocatableEnforced 5m7s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 5m7s (x8 over 5m8s) kubelet Node ha-434755 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 5m7s (x8 over 5m8s) kubelet Node ha-434755 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 5m7s (x7 over 5m8s) kubelet Node ha-434755 status is now: NodeHasSufficientPID
Normal Starting 5m kubelet Starting kubelet.
Normal NodeAllocatableEnforced 5m kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 5m kubelet Node ha-434755 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 5m kubelet Node ha-434755 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 5m kubelet Node ha-434755 status is now: NodeHasSufficientPID
Normal RegisteredNode 4m58s node-controller Node ha-434755 event: Registered Node ha-434755 in Controller
Normal RegisteredNode 4m29s node-controller Node ha-434755 event: Registered Node ha-434755 in Controller
Normal RegisteredNode 4m7s node-controller Node ha-434755 event: Registered Node ha-434755 in Controller
Name: ha-434755-m02
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=ha-434755-m02
kubernetes.io/os=linux
minikube.k8s.io/commit=6e37ee63f758843bb5fe33c3a528c564c4b83d53
minikube.k8s.io/name=ha-434755
minikube.k8s.io/primary=false
minikube.k8s.io/updated_at=2025_09_19T22_25_17_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Fri, 19 Sep 2025 22:25:17 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: ha-434755-m02
AcquireTime: <unset>
RenewTime: Fri, 19 Sep 2025 22:29:43 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Fri, 19 Sep 2025 22:25:37 +0000 Fri, 19 Sep 2025 22:25:17 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Fri, 19 Sep 2025 22:25:37 +0000 Fri, 19 Sep 2025 22:25:17 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Fri, 19 Sep 2025 22:25:37 +0000 Fri, 19 Sep 2025 22:25:17 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Fri, 19 Sep 2025 22:25:37 +0000 Fri, 19 Sep 2025 22:25:17 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.49.3
Hostname: ha-434755-m02
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863452Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863452Ki
pods: 110
System Info:
Machine ID: 3f074940c6024fccb9ca090ae79eac96
System UUID: 515c6c02-eba2-449d-b3e2-53eaa5e2a2c5
Boot ID: f409d6b2-5b2d-482a-a418-1c1a417dfa0a
Kernel Version: 6.8.0-1037-gcp
OS Image: Ubuntu 22.04.5 LTS
Operating System: linux
Architecture: amd64
Container Runtime Version: docker://28.4.0
Kubelet Version: v1.34.0
Kube-Proxy Version:
PodCIDR: 10.244.1.0/24
PodCIDRs: 10.244.1.0/24
Non-terminated Pods: (7 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
kube-system etcd-ha-434755-m02 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 4m27s
kube-system kindnet-74q9s 100m (1%) 100m (1%) 50Mi (0%) 50Mi (0%) 4m27s
kube-system kube-apiserver-ha-434755-m02 250m (3%) 0 (0%) 0 (0%) 0 (0%) 4m27s
kube-system kube-controller-manager-ha-434755-m02 200m (2%) 0 (0%) 0 (0%) 0 (0%) 4m27s
kube-system kube-proxy-4cnsm 0 (0%) 0 (0%) 0 (0%) 0 (0%) 4m27s
kube-system kube-scheduler-ha-434755-m02 100m (1%) 0 (0%) 0 (0%) 0 (0%) 4m27s
kube-system kube-vip-ha-434755-m02 0 (0%) 0 (0%) 0 (0%) 0 (0%) 4m27s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 750m (9%) 100m (1%)
memory 150Mi (0%) 50Mi (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 4m14s kube-proxy
Normal RegisteredNode 4m24s node-controller Node ha-434755-m02 event: Registered Node ha-434755-m02 in Controller
Normal RegisteredNode 4m23s node-controller Node ha-434755-m02 event: Registered Node ha-434755-m02 in Controller
Normal RegisteredNode 4m7s node-controller Node ha-434755-m02 event: Registered Node ha-434755-m02 in Controller
Name: ha-434755-m03
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=ha-434755-m03
kubernetes.io/os=linux
minikube.k8s.io/commit=6e37ee63f758843bb5fe33c3a528c564c4b83d53
minikube.k8s.io/name=ha-434755
minikube.k8s.io/primary=false
minikube.k8s.io/updated_at=2025_09_19T22_25_39_0700
minikube.k8s.io/version=v1.37.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Fri, 19 Sep 2025 22:25:38 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: ha-434755-m03
AcquireTime: <unset>
RenewTime: Fri, 19 Sep 2025 22:29:44 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Fri, 19 Sep 2025 22:26:09 +0000 Fri, 19 Sep 2025 22:25:38 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Fri, 19 Sep 2025 22:26:09 +0000 Fri, 19 Sep 2025 22:25:38 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Fri, 19 Sep 2025 22:26:09 +0000 Fri, 19 Sep 2025 22:25:38 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Fri, 19 Sep 2025 22:26:09 +0000 Fri, 19 Sep 2025 22:25:43 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.49.4
Hostname: ha-434755-m03
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863452Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32863452Ki
pods: 110
System Info:
Machine ID: 56ffdb437569490697f0dd38afc6a3b0
System UUID: d750116b-8986-4d1b-a4c8-19720c8ed559
Boot ID: f409d6b2-5b2d-482a-a418-1c1a417dfa0a
Kernel Version: 6.8.0-1037-gcp
OS Image: Ubuntu 22.04.5 LTS
Operating System: linux
Architecture: amd64
Container Runtime Version: docker://28.4.0
Kubelet Version: v1.34.0
Kube-Proxy Version:
PodCIDR: 10.244.2.0/24
PodCIDRs: 10.244.2.0/24
Non-terminated Pods: (7 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
kube-system etcd-ha-434755-m03 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 4m1s
kube-system kindnet-jrkrv 100m (1%) 100m (1%) 50Mi (0%) 50Mi (0%) 4m6s
kube-system kube-apiserver-ha-434755-m03 250m (3%) 0 (0%) 0 (0%) 0 (0%) 4m1s
kube-system kube-controller-manager-ha-434755-m03 200m (2%) 0 (0%) 0 (0%) 0 (0%) 4m1s
kube-system kube-proxy-dzrbh 0 (0%) 0 (0%) 0 (0%) 0 (0%) 4m6s
kube-system kube-scheduler-ha-434755-m03 100m (1%) 0 (0%) 0 (0%) 0 (0%) 4m1s
kube-system kube-vip-ha-434755-m03 0 (0%) 0 (0%) 0 (0%) 0 (0%) 4m1s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 750m (9%) 100m (1%)
memory 150Mi (0%) 50Mi (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal RegisteredNode 4m4s node-controller Node ha-434755-m03 event: Registered Node ha-434755-m03 in Controller
Normal RegisteredNode 4m3s node-controller Node ha-434755-m03 event: Registered Node ha-434755-m03 in Controller
Normal RegisteredNode 4m2s node-controller Node ha-434755-m03 event: Registered Node ha-434755-m03 in Controller
==> dmesg <==
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff 56 4e c7 de 18 97 08 06
[ +3.920915] IPv4: martian source 10.244.0.1 from 10.244.0.26, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff 26 69 01 69 2f bf 08 06
[Sep19 22:17] IPv4: martian source 10.244.0.1 from 10.244.0.27, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff 92 b4 6c 9e 2e a2 08 06
[ +0.000434] IPv4: martian source 10.244.0.27 from 10.244.0.3, on dev eth0
[ +0.000004] ll header: 00000000: ff ff ff ff ff ff a2 5a a6 ac 71 28 08 06
[Sep19 22:18] IPv4: martian source 10.244.0.1 from 10.244.0.32, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff 9e 5e 22 ac 7f b0 08 06
[ +0.000495] IPv4: martian source 10.244.0.32 from 10.244.0.3, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff a2 5a a6 ac 71 28 08 06
[ +0.000597] IPv4: martian source 10.244.0.32 from 10.244.0.8, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff f6 c3 58 35 ff 7f 08 06
[ +14.608947] IPv4: martian source 10.244.0.33 from 10.244.0.26, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff 26 69 01 69 2f bf 08 06
[ +1.598945] IPv4: martian source 10.244.0.26 from 10.244.0.3, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff a2 5a a6 ac 71 28 08 06
[Sep19 22:20] IPv4: martian source 10.244.0.1 from 10.244.0.2, on dev eth0
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff 12 b1 85 96 7b 86 08 06
[Sep19 22:22] IPv4: martian source 10.244.0.1 from 10.244.0.3, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff fa 02 8f 31 b5 07 08 06
[Sep19 22:23] IPv4: martian source 10.244.0.1 from 10.244.0.5, on dev eth0
[ +0.000009] ll header: 00000000: ff ff ff ff ff ff 52 66 98 c0 70 e0 08 06
[Sep19 22:24] IPv4: martian source 10.244.0.1 from 10.244.0.13, on dev eth0
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff 92 59 63 bf 9f 6e 08 06
==> etcd [f040530b1734] <==
{"level":"info","ts":"2025-09-19T22:25:32.268113Z","caller":"etcdserver/server.go:1838","msg":"sending merged snapshot","from":"aec36adc501070cc","to":"6088e2429f689fd8","bytes":1475095,"size":"1.5 MB"}
{"level":"info","ts":"2025-09-19T22:25:32.268302Z","caller":"rafthttp/snapshot_sender.go:82","msg":"sending database snapshot","snapshot-index":723,"remote-peer-id":"6088e2429f689fd8","bytes":1475095,"size":"1.5 MB"}
{"level":"info","ts":"2025-09-19T22:25:32.272009Z","caller":"etcdserver/snapshot_merge.go:64","msg":"sent database snapshot to writer","bytes":1466368,"size":"1.5 MB"}
{"level":"info","ts":"2025-09-19T22:25:32.274638Z","caller":"rafthttp/stream.go:248","msg":"set message encoder","from":"aec36adc501070cc","to":"6088e2429f689fd8","stream-type":"stream Message"}
{"level":"info","ts":"2025-09-19T22:25:32.274740Z","caller":"rafthttp/stream.go:273","msg":"established TCP streaming connection with remote peer","stream-writer-type":"stream Message","local-member-id":"aec36adc501070cc","remote-peer-id":"6088e2429f689fd8"}
{"level":"info","ts":"2025-09-19T22:25:32.276836Z","caller":"rafthttp/stream.go:248","msg":"set message encoder","from":"aec36adc501070cc","to":"6088e2429f689fd8","stream-type":"stream MsgApp v2"}
{"level":"info","ts":"2025-09-19T22:25:32.276872Z","caller":"rafthttp/stream.go:273","msg":"established TCP streaming connection with remote peer","stream-writer-type":"stream MsgApp v2","local-member-id":"aec36adc501070cc","remote-peer-id":"6088e2429f689fd8"}
{"level":"info","ts":"2025-09-19T22:25:32.284009Z","caller":"rafthttp/snapshot_sender.go:131","msg":"sent database snapshot","snapshot-index":723,"remote-peer-id":"6088e2429f689fd8","bytes":1475095,"size":"1.5 MB"}
{"level":"warn","ts":"2025-09-19T22:25:32.294689Z","caller":"rafthttp/stream.go:420","msg":"lost TCP streaming connection with remote peer","stream-reader-type":"stream Message","local-member-id":"aec36adc501070cc","remote-peer-id":"6088e2429f689fd8","error":"EOF"}
{"level":"warn","ts":"2025-09-19T22:25:32.294789Z","caller":"rafthttp/stream.go:420","msg":"lost TCP streaming connection with remote peer","stream-reader-type":"stream MsgApp v2","local-member-id":"aec36adc501070cc","remote-peer-id":"6088e2429f689fd8","error":"EOF"}
{"level":"info","ts":"2025-09-19T22:25:32.314771Z","caller":"rafthttp/stream.go:248","msg":"set message encoder","from":"aec36adc501070cc","to":"6088e2429f689fd8","stream-type":"stream MsgApp v2"}
{"level":"warn","ts":"2025-09-19T22:25:32.314816Z","caller":"rafthttp/stream.go:264","msg":"closed TCP streaming connection with remote peer","stream-writer-type":"stream MsgApp v2","local-member-id":"aec36adc501070cc","remote-peer-id":"6088e2429f689fd8"}
{"level":"info","ts":"2025-09-19T22:25:32.314829Z","caller":"rafthttp/stream.go:273","msg":"established TCP streaming connection with remote peer","stream-writer-type":"stream MsgApp v2","local-member-id":"aec36adc501070cc","remote-peer-id":"6088e2429f689fd8"}
{"level":"info","ts":"2025-09-19T22:25:32.315431Z","caller":"rafthttp/stream.go:248","msg":"set message encoder","from":"aec36adc501070cc","to":"6088e2429f689fd8","stream-type":"stream Message"}
{"level":"warn","ts":"2025-09-19T22:25:32.315457Z","caller":"rafthttp/stream.go:264","msg":"closed TCP streaming connection with remote peer","stream-writer-type":"stream Message","local-member-id":"aec36adc501070cc","remote-peer-id":"6088e2429f689fd8"}
{"level":"info","ts":"2025-09-19T22:25:32.315465Z","caller":"rafthttp/stream.go:273","msg":"established TCP streaming connection with remote peer","stream-writer-type":"stream Message","local-member-id":"aec36adc501070cc","remote-peer-id":"6088e2429f689fd8"}
{"level":"info","ts":"2025-09-19T22:25:32.351210Z","caller":"rafthttp/stream.go:411","msg":"established TCP streaming connection with remote peer","stream-reader-type":"stream MsgApp v2","local-member-id":"aec36adc501070cc","remote-peer-id":"6088e2429f689fd8"}
{"level":"info","ts":"2025-09-19T22:25:32.354520Z","caller":"rafthttp/stream.go:411","msg":"established TCP streaming connection with remote peer","stream-reader-type":"stream Message","local-member-id":"aec36adc501070cc","remote-peer-id":"6088e2429f689fd8"}
{"level":"info","ts":"2025-09-19T22:25:32.514320Z","logger":"raft","caller":"v3@v3.6.0/raft.go:1981","msg":"aec36adc501070cc switched to configuration voters=(6956058400243883992 12222697724345399935 12593026477526642892)"}
{"level":"info","ts":"2025-09-19T22:25:32.514484Z","caller":"membership/cluster.go:550","msg":"promote member","cluster-id":"fa54960ea34d58be","local-member-id":"aec36adc501070cc","promoted-member-id":"6088e2429f689fd8"}
{"level":"info","ts":"2025-09-19T22:25:32.514566Z","caller":"etcdserver/server.go:1752","msg":"applied a configuration change through raft","local-member-id":"aec36adc501070cc","raft-conf-change":"ConfChangeAddNode","raft-conf-change-node-id":"6088e2429f689fd8"}
{"level":"info","ts":"2025-09-19T22:25:34.029285Z","caller":"etcdserver/server.go:1856","msg":"sent merged snapshot","from":"aec36adc501070cc","to":"a99fbed258953a7f","bytes":933879,"size":"934 kB","took":"30.016077713s"}
{"level":"info","ts":"2025-09-19T22:25:38.912832Z","caller":"etcdserver/server.go:2246","msg":"skip compaction since there is an inflight snapshot"}
{"level":"info","ts":"2025-09-19T22:25:44.676267Z","caller":"etcdserver/server.go:2246","msg":"skip compaction since there is an inflight snapshot"}
{"level":"info","ts":"2025-09-19T22:26:02.284428Z","caller":"etcdserver/server.go:1856","msg":"sent merged snapshot","from":"aec36adc501070cc","to":"6088e2429f689fd8","bytes":1475095,"size":"1.5 MB","took":"30.016313758s"}
==> kernel <==
22:29:44 up 1:12, 0 users, load average: 0.44, 4.23, 27.87
Linux ha-434755 6.8.0-1037-gcp #39~22.04.1-Ubuntu SMP Thu Aug 21 17:29:24 UTC 2025 x86_64 x86_64 x86_64 GNU/Linux
PRETTY_NAME="Ubuntu 22.04.5 LTS"
==> kindnet [acbbcaa7a50e] <==
I0919 22:29:03.800106 1 main.go:324] Node ha-434755-m03 has CIDR [10.244.2.0/24]
I0919 22:29:13.792481 1 main.go:297] Handling node with IPs: map[192.168.49.2:{}]
I0919 22:29:13.792539 1 main.go:301] handling current node
I0919 22:29:13.792556 1 main.go:297] Handling node with IPs: map[192.168.49.3:{}]
I0919 22:29:13.792561 1 main.go:324] Node ha-434755-m02 has CIDR [10.244.1.0/24]
I0919 22:29:13.792859 1 main.go:297] Handling node with IPs: map[192.168.49.4:{}]
I0919 22:29:13.792873 1 main.go:324] Node ha-434755-m03 has CIDR [10.244.2.0/24]
I0919 22:29:23.796613 1 main.go:297] Handling node with IPs: map[192.168.49.3:{}]
I0919 22:29:23.796653 1 main.go:324] Node ha-434755-m02 has CIDR [10.244.1.0/24]
I0919 22:29:23.797221 1 main.go:297] Handling node with IPs: map[192.168.49.4:{}]
I0919 22:29:23.797257 1 main.go:324] Node ha-434755-m03 has CIDR [10.244.2.0/24]
I0919 22:29:23.797450 1 main.go:297] Handling node with IPs: map[192.168.49.2:{}]
I0919 22:29:23.797464 1 main.go:301] handling current node
I0919 22:29:33.799595 1 main.go:297] Handling node with IPs: map[192.168.49.3:{}]
I0919 22:29:33.799631 1 main.go:324] Node ha-434755-m02 has CIDR [10.244.1.0/24]
I0919 22:29:33.799839 1 main.go:297] Handling node with IPs: map[192.168.49.4:{}]
I0919 22:29:33.799852 1 main.go:324] Node ha-434755-m03 has CIDR [10.244.2.0/24]
I0919 22:29:33.799961 1 main.go:297] Handling node with IPs: map[192.168.49.2:{}]
I0919 22:29:33.799975 1 main.go:301] handling current node
I0919 22:29:43.800602 1 main.go:297] Handling node with IPs: map[192.168.49.2:{}]
I0919 22:29:43.800641 1 main.go:301] handling current node
I0919 22:29:43.800661 1 main.go:297] Handling node with IPs: map[192.168.49.3:{}]
I0919 22:29:43.800668 1 main.go:324] Node ha-434755-m02 has CIDR [10.244.1.0/24]
I0919 22:29:43.800873 1 main.go:297] Handling node with IPs: map[192.168.49.4:{}]
I0919 22:29:43.800890 1 main.go:324] Node ha-434755-m03 has CIDR [10.244.2.0/24]
==> kube-apiserver [baeef3d33381] <==
I0919 22:24:40.696152 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000
I0919 22:24:40.699966 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000
I0919 22:24:40.699987 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
I0919 22:24:41.126661 1 controller.go:667] quota admission added evaluator for: roles.rbac.authorization.k8s.io
I0919 22:24:41.164479 1 controller.go:667] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io
I0919 22:24:41.300535 1 alloc.go:328] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"}
W0919 22:24:41.306999 1 lease.go:265] Resetting endpoints for master service "kubernetes" to [192.168.49.2]
I0919 22:24:41.308248 1 controller.go:667] quota admission added evaluator for: endpoints
I0919 22:24:41.312358 1 controller.go:667] quota admission added evaluator for: endpointslices.discovery.k8s.io
I0919 22:24:41.730293 1 controller.go:667] quota admission added evaluator for: serviceaccounts
I0919 22:24:44.451829 1 controller.go:667] quota admission added evaluator for: deployments.apps
I0919 22:24:44.460659 1 alloc.go:328] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"}
I0919 22:24:44.467080 1 controller.go:667] quota admission added evaluator for: daemonsets.apps
I0919 22:24:47.036591 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12
I0919 22:24:47.041406 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12
I0919 22:24:47.734451 1 controller.go:667] quota admission added evaluator for: replicasets.apps
I0919 22:24:47.782975 1 controller.go:667] quota admission added evaluator for: controllerrevisions.apps
I0919 22:24:47.782975 1 controller.go:667] quota admission added evaluator for: controllerrevisions.apps
I0919 22:25:42.022930 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0919 22:26:02.142559 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0919 22:27:03.352353 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0919 22:27:21.770448 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0919 22:28:25.641963 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0919 22:28:34.035829 1 stats.go:136] "Error getting keys" err="empty key: \"\""
I0919 22:29:43.682113 1 stats.go:136] "Error getting keys" err="empty key: \"\""
==> kube-controller-manager [9d7035076f5b] <==
I0919 22:24:46.729892 1 shared_informer.go:356] "Caches are synced" controller="resource_claim"
I0919 22:24:46.729917 1 shared_informer.go:356] "Caches are synced" controller="taint-eviction-controller"
I0919 22:24:46.730126 1 shared_informer.go:356] "Caches are synced" controller="PVC protection"
I0919 22:24:46.730563 1 shared_informer.go:356] "Caches are synced" controller="endpoint_slice_mirroring"
I0919 22:24:46.730598 1 shared_informer.go:356] "Caches are synced" controller="ClusterRoleAggregator"
I0919 22:24:46.730680 1 shared_informer.go:356] "Caches are synced" controller="stateful set"
I0919 22:24:46.731332 1 shared_informer.go:356] "Caches are synced" controller="deployment"
I0919 22:24:46.733702 1 shared_informer.go:356] "Caches are synced" controller="attach detach"
I0919 22:24:46.734879 1 shared_informer.go:356] "Caches are synced" controller="resource quota"
I0919 22:24:46.739793 1 shared_informer.go:356] "Caches are synced" controller="expand"
I0919 22:24:46.745067 1 shared_informer.go:356] "Caches are synced" controller="endpoint_slice"
I0919 22:24:46.756573 1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
I0919 22:24:46.759762 1 shared_informer.go:356] "Caches are synced" controller="garbage collector"
I0919 22:24:46.759775 1 garbagecollector.go:154] "Garbage collector: all resource monitors have synced" logger="garbage-collector-controller"
I0919 22:24:46.759781 1 garbagecollector.go:157] "Proceeding to collect garbage" logger="garbage-collector-controller"
E0919 22:25:16.502891 1 certificate_controller.go:151] "Unhandled Error" err="Sync csr-8gznq failed with : error updating signature for csr: Operation cannot be fulfilled on certificatesigningrequests.certificates.k8s.io \"csr-8gznq\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError"
I0919 22:25:16.953356 1 endpointslice_controller.go:344] "Error syncing endpoint slices for service, retrying" logger="endpointslice-controller" key="kube-system/kube-dns" err="failed to update kube-dns-btr4q EndpointSlice for Service kube-system/kube-dns: Operation cannot be fulfilled on endpointslices.discovery.k8s.io \"kube-dns-btr4q\": the object has been modified; please apply your changes to the latest version and try again"
I0919 22:25:16.953452 1 event.go:377] Event(v1.ObjectReference{Kind:"Service", Namespace:"kube-system", Name:"kube-dns", UID:"6bf58c8f-abca-468b-a2c7-04acb3bb338e", APIVersion:"v1", ResourceVersion:"309", FieldPath:""}): type: 'Warning' reason: 'FailedToUpdateEndpointSlices' Error updating Endpoint Slices for Service kube-system/kube-dns: failed to update kube-dns-btr4q EndpointSlice for Service kube-system/kube-dns: Operation cannot be fulfilled on endpointslices.discovery.k8s.io "kube-dns-btr4q": the object has been modified; please apply your changes to the latest version and try again
I0919 22:25:17.013440 1 actual_state_of_world.go:541] "Failed to update statusUpdateNeeded field in actual state of world" logger="persistentvolume-attach-detach-controller" err="Failed to set statusUpdateNeeded to needed true, because nodeName=\"ha-434755-m02\" does not exist"
I0919 22:25:17.029166 1 range_allocator.go:428] "Set node PodCIDR" logger="node-ipam-controller" node="ha-434755-m02" podCIDRs=["10.244.1.0/24"]
I0919 22:25:21.734993 1 node_lifecycle_controller.go:873] "Missing timestamp for Node. Assuming now as a timestamp" logger="node-lifecycle-controller" node="ha-434755-m02"
E0919 22:25:38.070022 1 certificate_controller.go:151] "Unhandled Error" err="Sync csr-2nm58 failed with : error updating approval for csr: Operation cannot be fulfilled on certificatesigningrequests.certificates.k8s.io \"csr-2nm58\": the object has been modified; please apply your changes to the latest version and try again" logger="UnhandledError"
I0919 22:25:38.835123 1 actual_state_of_world.go:541] "Failed to update statusUpdateNeeded field in actual state of world" logger="persistentvolume-attach-detach-controller" err="Failed to set statusUpdateNeeded to needed true, because nodeName=\"ha-434755-m03\" does not exist"
I0919 22:25:38.849612 1 range_allocator.go:428] "Set node PodCIDR" logger="node-ipam-controller" node="ha-434755-m03" podCIDRs=["10.244.2.0/24"]
I0919 22:25:41.746239 1 node_lifecycle_controller.go:873] "Missing timestamp for Node. Assuming now as a timestamp" logger="node-lifecycle-controller" node="ha-434755-m03"
==> kube-proxy [c4058cbf0779] <==
I0919 22:24:49.209419 1 server_linux.go:53] "Using iptables proxy"
I0919 22:24:49.290786 1 shared_informer.go:349] "Waiting for caches to sync" controller="node informer cache"
I0919 22:24:49.391927 1 shared_informer.go:356] "Caches are synced" controller="node informer cache"
I0919 22:24:49.391969 1 server.go:219] "Successfully retrieved NodeIPs" NodeIPs=["192.168.49.2"]
E0919 22:24:49.392097 1 server.go:256] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I0919 22:24:49.414535 1 server.go:265] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I0919 22:24:49.414600 1 server_linux.go:132] "Using iptables Proxier"
I0919 22:24:49.419756 1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I0919 22:24:49.420226 1 server.go:527] "Version info" version="v1.34.0"
I0919 22:24:49.420264 1 server.go:529] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I0919 22:24:49.421883 1 config.go:403] "Starting serviceCIDR config controller"
I0919 22:24:49.421917 1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config"
I0919 22:24:49.421937 1 config.go:200] "Starting service config controller"
I0919 22:24:49.421945 1 shared_informer.go:349] "Waiting for caches to sync" controller="service config"
I0919 22:24:49.422002 1 config.go:106] "Starting endpoint slice config controller"
I0919 22:24:49.422054 1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config"
I0919 22:24:49.422089 1 config.go:309] "Starting node config controller"
I0919 22:24:49.422095 1 shared_informer.go:349] "Waiting for caches to sync" controller="node config"
I0919 22:24:49.522136 1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config"
I0919 22:24:49.522161 1 shared_informer.go:356] "Caches are synced" controller="service config"
I0919 22:24:49.522187 1 shared_informer.go:356] "Caches are synced" controller="node config"
I0919 22:24:49.522304 1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config"
==> kube-scheduler [3b75df9b742b] <==
E0919 22:24:39.747690 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolumeClaim"
E0919 22:24:39.747769 1 reflector.go:205] "Failed to watch" err="failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"statefulsets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StatefulSet"
E0919 22:24:39.747766 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csinodes\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSINode"
E0919 22:24:40.575330 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Node: nodes is forbidden: User \"system:kube-scheduler\" cannot list resource \"nodes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node"
E0919 22:24:40.592760 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ConfigMap: configmaps \"extension-apiserver-authentication\" is forbidden: User \"system:kube-scheduler\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\"" logger="UnhandledError" reflector="runtime/asm_amd64.s:1700" type="*v1.ConfigMap"
E0919 22:24:40.606110 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ResourceClaim: resourceclaims.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceclaims\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ResourceClaim"
E0919 22:24:40.613300 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver"
E0919 22:24:40.705675 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ResourceSlice: resourceslices.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceslices\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ResourceSlice"
E0919 22:24:40.757341 1 reflector.go:205] "Failed to watch" err="failed to list *v1.VolumeAttachment: volumeattachments.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"volumeattachments\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.VolumeAttachment"
E0919 22:24:40.757342 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicasets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicaSet"
E0919 22:24:40.789762 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Pod: pods is forbidden: User \"system:kube-scheduler\" cannot list resource \"pods\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Pod"
E0919 22:24:40.800954 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PodDisruptionBudget"
E0919 22:24:40.811376 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIStorageCapacity"
E0919 22:24:40.825276 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Service: services is forbidden: User \"system:kube-scheduler\" cannot list resource \"services\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Service"
E0919 22:24:40.860558 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolume"
E0919 22:24:40.875460 1 reflector.go:205] "Failed to watch" err="failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"storageclasses\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StorageClass"
I0919 22:24:43.743600 1 shared_informer.go:356] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
E0919 22:25:17.048594 1 framework.go:1400] "Plugin Failed" err="Operation cannot be fulfilled on pods/binding \"kube-proxy-4cnsm\": pod kube-proxy-4cnsm is already assigned to node \"ha-434755-m02\"" plugin="DefaultBinder" pod="kube-system/kube-proxy-4cnsm" node="ha-434755-m02"
E0919 22:25:17.048715 1 schedule_one.go:379] "scheduler cache ForgetPod failed" err="pod a477a521-e24b-449d-854f-c873cb517164(kube-system/kube-proxy-4cnsm) wasn't assumed so cannot be forgotten" logger="UnhandledError" pod="kube-system/kube-proxy-4cnsm"
E0919 22:25:17.048747 1 schedule_one.go:1079] "Error scheduling pod; retrying" err="running Bind plugin \"DefaultBinder\": Operation cannot be fulfilled on pods/binding \"kube-proxy-4cnsm\": pod kube-proxy-4cnsm is already assigned to node \"ha-434755-m02\"" logger="UnhandledError" pod="kube-system/kube-proxy-4cnsm"
E0919 22:25:17.048815 1 framework.go:1400] "Plugin Failed" err="Operation cannot be fulfilled on pods/binding \"kindnet-74q9s\": pod kindnet-74q9s is already assigned to node \"ha-434755-m02\"" plugin="DefaultBinder" pod="kube-system/kindnet-74q9s" node="ha-434755-m02"
E0919 22:25:17.048849 1 schedule_one.go:379] "scheduler cache ForgetPod failed" err="pod 06bab6e9-ad22-4651-947e-723307c31d04(kube-system/kindnet-74q9s) wasn't assumed so cannot be forgotten" logger="UnhandledError" pod="kube-system/kindnet-74q9s"
I0919 22:25:17.050318 1 schedule_one.go:1092] "Pod has been assigned to node. Abort adding it back to queue." pod="kube-system/kube-proxy-4cnsm" node="ha-434755-m02"
E0919 22:25:17.050187 1 schedule_one.go:1079] "Error scheduling pod; retrying" err="running Bind plugin \"DefaultBinder\": Operation cannot be fulfilled on pods/binding \"kindnet-74q9s\": pod kindnet-74q9s is already assigned to node \"ha-434755-m02\"" logger="UnhandledError" pod="kube-system/kindnet-74q9s"
I0919 22:25:17.050575 1 schedule_one.go:1092] "Pod has been assigned to node. Abort adding it back to queue." pod="kube-system/kindnet-74q9s" node="ha-434755-m02"
==> kubelet <==
Sep 19 22:24:47 ha-434755 kubelet[2465]: I0919 22:24:47.867473 2465 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/9d9843d9-c2ca-4751-8af5-f8fc91cf07c9-kube-proxy\") pod \"kube-proxy-gzpg8\" (UID: \"9d9843d9-c2ca-4751-8af5-f8fc91cf07c9\") " pod="kube-system/kube-proxy-gzpg8"
Sep 19 22:24:47 ha-434755 kubelet[2465]: I0919 22:24:47.867488 2465 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/9d9843d9-c2ca-4751-8af5-f8fc91cf07c9-xtables-lock\") pod \"kube-proxy-gzpg8\" (UID: \"9d9843d9-c2ca-4751-8af5-f8fc91cf07c9\") " pod="kube-system/kube-proxy-gzpg8"
Sep 19 22:24:47 ha-434755 kubelet[2465]: I0919 22:24:47.867528 2465 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/9d9843d9-c2ca-4751-8af5-f8fc91cf07c9-lib-modules\") pod \"kube-proxy-gzpg8\" (UID: \"9d9843d9-c2ca-4751-8af5-f8fc91cf07c9\") " pod="kube-system/kube-proxy-gzpg8"
Sep 19 22:24:47 ha-434755 kubelet[2465]: I0919 22:24:47.867560 2465 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/dd2c97ac-215c-4657-a3af-bf74603285af-lib-modules\") pod \"kindnet-djvx4\" (UID: \"dd2c97ac-215c-4657-a3af-bf74603285af\") " pod="kube-system/kindnet-djvx4"
Sep 19 22:24:47 ha-434755 kubelet[2465]: I0919 22:24:47.867616 2465 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5mg64\" (UniqueName: \"kubernetes.io/projected/9d9843d9-c2ca-4751-8af5-f8fc91cf07c9-kube-api-access-5mg64\") pod \"kube-proxy-gzpg8\" (UID: \"9d9843d9-c2ca-4751-8af5-f8fc91cf07c9\") " pod="kube-system/kube-proxy-gzpg8"
Sep 19 22:24:47 ha-434755 kubelet[2465]: I0919 22:24:47.967871 2465 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/54431fee-554c-4c3c-9c81-d779981d36db-config-volume\") pod \"coredns-66bc5c9577-w8trg\" (UID: \"54431fee-554c-4c3c-9c81-d779981d36db\") " pod="kube-system/coredns-66bc5c9577-w8trg"
Sep 19 22:24:47 ha-434755 kubelet[2465]: I0919 22:24:47.968112 2465 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8tk2k\" (UniqueName: \"kubernetes.io/projected/54431fee-554c-4c3c-9c81-d779981d36db-kube-api-access-8tk2k\") pod \"coredns-66bc5c9577-w8trg\" (UID: \"54431fee-554c-4c3c-9c81-d779981d36db\") " pod="kube-system/coredns-66bc5c9577-w8trg"
Sep 19 22:24:48 ha-434755 kubelet[2465]: I0919 22:24:48.069218 2465 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0f31e1cc-6bbb-4987-93c7-48e61288b609-config-volume\") pod \"coredns-66bc5c9577-4lmln\" (UID: \"0f31e1cc-6bbb-4987-93c7-48e61288b609\") " pod="kube-system/coredns-66bc5c9577-4lmln"
Sep 19 22:24:48 ha-434755 kubelet[2465]: I0919 22:24:48.069281 2465 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xxbd6\" (UniqueName: \"kubernetes.io/projected/0f31e1cc-6bbb-4987-93c7-48e61288b609-kube-api-access-xxbd6\") pod \"coredns-66bc5c9577-4lmln\" (UID: \"0f31e1cc-6bbb-4987-93c7-48e61288b609\") " pod="kube-system/coredns-66bc5c9577-4lmln"
Sep 19 22:24:48 ha-434755 kubelet[2465]: I0919 22:24:48.597179 2465 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=1.59714647 podStartE2EDuration="1.59714647s" podCreationTimestamp="2025-09-19 22:24:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-19 22:24:48.596804879 +0000 UTC m=+4.412561769" watchObservedRunningTime="2025-09-19 22:24:48.59714647 +0000 UTC m=+4.412903362"
Sep 19 22:24:49 ha-434755 kubelet[2465]: I0919 22:24:49.381213 2465 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/coredns-66bc5c9577-4lmln" podStartSLOduration=2.381182844 podStartE2EDuration="2.381182844s" podCreationTimestamp="2025-09-19 22:24:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-19 22:24:49.369703818 +0000 UTC m=+5.185460747" watchObservedRunningTime="2025-09-19 22:24:49.381182844 +0000 UTC m=+5.196939736"
Sep 19 22:24:49 ha-434755 kubelet[2465]: I0919 22:24:49.381451 2465 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-proxy-gzpg8" podStartSLOduration=2.381444212 podStartE2EDuration="2.381444212s" podCreationTimestamp="2025-09-19 22:24:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-19 22:24:49.381368165 +0000 UTC m=+5.197125048" watchObservedRunningTime="2025-09-19 22:24:49.381444212 +0000 UTC m=+5.197201101"
Sep 19 22:24:53 ha-434755 kubelet[2465]: I0919 22:24:53.429938 2465 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/coredns-66bc5c9577-w8trg" podStartSLOduration=6.429916905 podStartE2EDuration="6.429916905s" podCreationTimestamp="2025-09-19 22:24:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-09-19 22:24:49.399922361 +0000 UTC m=+5.215679245" watchObservedRunningTime="2025-09-19 22:24:53.429916905 +0000 UTC m=+9.245673795"
Sep 19 22:24:53 ha-434755 kubelet[2465]: I0919 22:24:53.430179 2465 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kindnet-djvx4" podStartSLOduration=2.5583203169999997 podStartE2EDuration="6.430170951s" podCreationTimestamp="2025-09-19 22:24:47 +0000 UTC" firstStartedPulling="2025-09-19 22:24:49.225935906 +0000 UTC m=+5.041692778" lastFinishedPulling="2025-09-19 22:24:53.097786536 +0000 UTC m=+8.913543412" observedRunningTime="2025-09-19 22:24:53.429847961 +0000 UTC m=+9.245604852" watchObservedRunningTime="2025-09-19 22:24:53.430170951 +0000 UTC m=+9.245927840"
Sep 19 22:24:54 ha-434755 kubelet[2465]: I0919 22:24:54.488942 2465 kuberuntime_manager.go:1828] "Updating runtime config through cri with podcidr" CIDR="10.244.0.0/24"
Sep 19 22:24:54 ha-434755 kubelet[2465]: I0919 22:24:54.490039 2465 kubelet_network.go:47] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24"
Sep 19 22:25:02 ha-434755 kubelet[2465]: I0919 22:25:02.592732 2465 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="de54ed5bb258a7d8937149fcb9be16e03e34cd6b8786d874a980e9f9ec26d429"
Sep 19 22:25:02 ha-434755 kubelet[2465]: I0919 22:25:02.617104 2465 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b987cc756018033717c69e468416998c2b07c3a7a6aab5e56b199bbd88fb51fe"
Sep 19 22:25:15 ha-434755 kubelet[2465]: I0919 22:25:15.870121 2465 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bc57496cf8c97a97999359a9838b6036be50e94cb061c0b1a8b8d03c6c47882f"
Sep 19 22:25:15 ha-434755 kubelet[2465]: I0919 22:25:15.870167 2465 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="62cd9dd3b99a779d6b1ffe72046bafeef3d781c016335de5886ea2ca70bf69d4"
Sep 19 22:25:15 ha-434755 kubelet[2465]: I0919 22:25:15.870191 2465 scope.go:117] "RemoveContainer" containerID="fd0a3ab5f285697717d070472745c94ac46d7e376804e2b2690d8192c539ce06"
Sep 19 22:25:15 ha-434755 kubelet[2465]: I0919 22:25:15.881409 2465 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="89b975ea350c8ada63866afcc9dfe8d144799fa6442ff30b95e39235ca314606"
Sep 19 22:25:15 ha-434755 kubelet[2465]: I0919 22:25:15.881468 2465 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b69dcaba1fe3e6996e4b1abe588d8ed828c8e1b07e61838a54d5c6eea3a368de"
Sep 19 22:25:15 ha-434755 kubelet[2465]: I0919 22:25:15.883877 2465 scope.go:117] "RemoveContainer" containerID="f7365ae03012282e042fcdbb9d87e94b89928381e3b6f701b58d0e425f83b14a"
Sep 19 22:25:18 ha-434755 kubelet[2465]: I0919 22:25:18.938936 2465 scope.go:117] "RemoveContainer" containerID="7dcf79d61a67e78a7e98abac24d2bff68653f6f436028d21debd03806fd167ff"
-- /stdout --
helpers_test.go:262: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p ha-434755 -n ha-434755
helpers_test.go:269: (dbg) Run: kubectl --context ha-434755 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:293: <<< TestMultiControlPlane/serial/StartCluster FAILED: end of post-mortem logs <<<
helpers_test.go:294: ---------------------/post-mortem---------------------------------
--- FAIL: TestMultiControlPlane/serial/StartCluster (324.63s)