=== RUN TestForceSystemdFlag
=== PAUSE TestForceSystemdFlag
=== CONT TestForceSystemdFlag
docker_test.go:91: (dbg) Run: out/minikube-linux-arm64 start -p force-systemd-flag-389625 --memory=3072 --force-systemd --alsologtostderr -v=5 --driver=docker --container-runtime=docker
E0110 02:32:55.261227 2222877 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/functional-394803/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0110 02:34:12.146258 2222877 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/addons-991766/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0110 02:34:45.884914 2222877 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/skaffold-245004/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0110 02:34:45.890323 2222877 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/skaffold-245004/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0110 02:34:45.900733 2222877 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/skaffold-245004/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0110 02:34:45.920998 2222877 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/skaffold-245004/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0110 02:34:45.961291 2222877 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/skaffold-245004/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0110 02:34:46.041718 2222877 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/skaffold-245004/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0110 02:34:46.202126 2222877 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/skaffold-245004/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0110 02:34:46.522823 2222877 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/skaffold-245004/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0110 02:34:47.163780 2222877 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/skaffold-245004/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0110 02:34:48.444452 2222877 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/skaffold-245004/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0110 02:34:51.006158 2222877 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/skaffold-245004/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0110 02:34:56.126407 2222877 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/skaffold-245004/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0110 02:35:06.366837 2222877 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/skaffold-245004/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0110 02:35:26.847082 2222877 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/skaffold-245004/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0110 02:36:07.808692 2222877 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/skaffold-245004/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0110 02:36:09.096432 2222877 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/addons-991766/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0110 02:37:29.728903 2222877 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/skaffold-245004/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0110 02:37:55.261192 2222877 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/functional-394803/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
E0110 02:39:45.884463 2222877 cert_rotation.go:172] "Loading client cert failed" err="open /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/skaffold-245004/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key"
docker_test.go:91: (dbg) Non-zero exit: out/minikube-linux-arm64 start -p force-systemd-flag-389625 --memory=3072 --force-systemd --alsologtostderr -v=5 --driver=docker --container-runtime=docker: exit status 109 (8m24.168711646s)
-- stdout --
* [force-systemd-flag-389625] minikube v1.37.0 on Ubuntu 20.04 (arm64)
- MINIKUBE_LOCATION=22414
- MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
- KUBECONFIG=/home/jenkins/minikube-integration/22414-2221005/kubeconfig
- MINIKUBE_HOME=/home/jenkins/minikube-integration/22414-2221005/.minikube
- MINIKUBE_BIN=out/minikube-linux-arm64
- MINIKUBE_FORCE_SYSTEMD=
* Using the docker driver based on user configuration
* Using Docker driver with root privileges
* Starting "force-systemd-flag-389625" primary control-plane node in "force-systemd-flag-389625" cluster
* Pulling base image v0.0.48-1767944074-22401 ...
-- /stdout --
** stderr **
I0110 02:31:31.403273 2444942 out.go:360] Setting OutFile to fd 1 ...
I0110 02:31:31.403569 2444942 out.go:408] TERM=,COLORTERM=, which probably does not support color
I0110 02:31:31.403599 2444942 out.go:374] Setting ErrFile to fd 2...
I0110 02:31:31.403618 2444942 out.go:408] TERM=,COLORTERM=, which probably does not support color
I0110 02:31:31.403919 2444942 root.go:338] Updating PATH: /home/jenkins/minikube-integration/22414-2221005/.minikube/bin
I0110 02:31:31.404424 2444942 out.go:368] Setting JSON to false
I0110 02:31:31.405395 2444942 start.go:133] hostinfo: {"hostname":"ip-172-31-29-130","uptime":36841,"bootTime":1767975451,"procs":160,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1084-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"36adf542-ef4f-4e2d-a0c8-6868d1383ff9"}
I0110 02:31:31.405497 2444942 start.go:143] virtualization:
I0110 02:31:31.408819 2444942 out.go:179] * [force-systemd-flag-389625] minikube v1.37.0 on Ubuntu 20.04 (arm64)
I0110 02:31:31.412885 2444942 out.go:179] - MINIKUBE_LOCATION=22414
I0110 02:31:31.412964 2444942 notify.go:221] Checking for updates...
I0110 02:31:31.425190 2444942 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I0110 02:31:31.428163 2444942 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/22414-2221005/kubeconfig
I0110 02:31:31.431030 2444942 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/22414-2221005/.minikube
I0110 02:31:31.433941 2444942 out.go:179] - MINIKUBE_BIN=out/minikube-linux-arm64
I0110 02:31:31.436853 2444942 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I0110 02:31:31.440329 2444942 config.go:182] Loaded profile config "force-systemd-env-405089": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.35.0
I0110 02:31:31.440445 2444942 driver.go:422] Setting default libvirt URI to qemu:///system
I0110 02:31:31.473277 2444942 docker.go:124] docker version: linux-28.1.1:Docker Engine - Community
I0110 02:31:31.473389 2444942 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0110 02:31:31.569510 2444942 info.go:266] docker info: {ID:U5VK:ZNT5:35M3:FHLW:Q7TL:ELFX:BNAG:AV4T:UD2H:SK5L:SEJV:SJJL Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:35 OomKillDisable:true NGoroutines:59 SystemTime:2026-01-10 02:31:31.559356986 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214835200 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-29-130 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx P
ath:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
I0110 02:31:31.569623 2444942 docker.go:319] overlay module found
I0110 02:31:31.577195 2444942 out.go:179] * Using the docker driver based on user configuration
I0110 02:31:31.580216 2444942 start.go:309] selected driver: docker
I0110 02:31:31.580239 2444942 start.go:928] validating driver "docker" against <nil>
I0110 02:31:31.580254 2444942 start.go:939] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I0110 02:31:31.580972 2444942 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0110 02:31:31.685470 2444942 info.go:266] docker info: {ID:U5VK:ZNT5:35M3:FHLW:Q7TL:ELFX:BNAG:AV4T:UD2H:SK5L:SEJV:SJJL Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:35 OomKillDisable:true NGoroutines:59 SystemTime:2026-01-10 02:31:31.673022095 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214835200 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-29-130 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx P
ath:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
I0110 02:31:31.685622 2444942 start_flags.go:333] no existing cluster config was found, will generate one from the flags
I0110 02:31:31.685842 2444942 start_flags.go:1001] Wait components to verify : map[apiserver:true system_pods:true]
I0110 02:31:31.695072 2444942 out.go:179] * Using Docker driver with root privileges
I0110 02:31:31.704472 2444942 cni.go:84] Creating CNI manager for ""
I0110 02:31:31.704566 2444942 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0110 02:31:31.704582 2444942 start_flags.go:342] Found "bridge CNI" CNI - setting NetworkPlugin=cni
I0110 02:31:31.704671 2444942 start.go:353] cluster config:
{Name:force-systemd-flag-389625 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1767944074-22401@sha256:5af296c365892fa7c4c61cd02bf3cdb33e2c362939e717d7686924b3b3f07773 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.35.0 ClusterName:force-systemd-flag-389625 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluste
r.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.35.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s Rosetta:false}
I0110 02:31:31.716615 2444942 out.go:179] * Starting "force-systemd-flag-389625" primary control-plane node in "force-systemd-flag-389625" cluster
I0110 02:31:31.725232 2444942 cache.go:134] Beginning downloading kic base image for docker with docker
I0110 02:31:31.731542 2444942 out.go:179] * Pulling base image v0.0.48-1767944074-22401 ...
I0110 02:31:31.734740 2444942 preload.go:188] Checking if preload exists for k8s version v1.35.0 and runtime docker
I0110 02:31:31.734792 2444942 preload.go:203] Found local preload: /home/jenkins/minikube-integration/22414-2221005/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.35.0-docker-overlay2-arm64.tar.lz4
I0110 02:31:31.734803 2444942 cache.go:65] Caching tarball of preloaded images
I0110 02:31:31.734922 2444942 preload.go:251] Found /home/jenkins/minikube-integration/22414-2221005/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.35.0-docker-overlay2-arm64.tar.lz4 in cache, skipping download
I0110 02:31:31.734933 2444942 cache.go:68] Finished verifying existence of preloaded tar for v1.35.0 on docker
I0110 02:31:31.735052 2444942 profile.go:143] Saving config to /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/config.json ...
I0110 02:31:31.735070 2444942 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/config.json: {Name:mkf231dfddb62b8df14c42136e70d1c72c396e08 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0110 02:31:31.735223 2444942 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1767944074-22401@sha256:5af296c365892fa7c4c61cd02bf3cdb33e2c362939e717d7686924b3b3f07773 in local docker daemon
I0110 02:31:31.768290 2444942 image.go:100] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1767944074-22401@sha256:5af296c365892fa7c4c61cd02bf3cdb33e2c362939e717d7686924b3b3f07773 in local docker daemon, skipping pull
I0110 02:31:31.768314 2444942 cache.go:158] gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1767944074-22401@sha256:5af296c365892fa7c4c61cd02bf3cdb33e2c362939e717d7686924b3b3f07773 exists in daemon, skipping load
I0110 02:31:31.768329 2444942 cache.go:243] Successfully downloaded all kic artifacts
I0110 02:31:31.768360 2444942 start.go:360] acquireMachinesLock for force-systemd-flag-389625: {Name:mkda4641748142b11aadec6867161d872c9610a8 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0110 02:31:31.768468 2444942 start.go:364] duration metric: took 88.236µs to acquireMachinesLock for "force-systemd-flag-389625"
I0110 02:31:31.768503 2444942 start.go:93] Provisioning new machine with config: &{Name:force-systemd-flag-389625 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1767944074-22401@sha256:5af296c365892fa7c4c61cd02bf3cdb33e2c362939e717d7686924b3b3f07773 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.35.0 ClusterName:force-systemd-flag-389625 Namespace:default APIServer
HAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.35.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: Static
IP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s Rosetta:false} &{Name: IP: Port:8443 KubernetesVersion:v1.35.0 ContainerRuntime:docker ControlPlane:true Worker:true}
I0110 02:31:31.768575 2444942 start.go:125] createHost starting for "" (driver="docker")
I0110 02:31:31.770687 2444942 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I0110 02:31:31.770958 2444942 start.go:159] libmachine.API.Create for "force-systemd-flag-389625" (driver="docker")
I0110 02:31:31.770996 2444942 client.go:173] LocalClient.Create starting
I0110 02:31:31.771061 2444942 main.go:144] libmachine: Reading certificate data from /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/ca.pem
I0110 02:31:31.771107 2444942 main.go:144] libmachine: Decoding PEM data...
I0110 02:31:31.771131 2444942 main.go:144] libmachine: Parsing certificate...
I0110 02:31:31.771194 2444942 main.go:144] libmachine: Reading certificate data from /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/cert.pem
I0110 02:31:31.771216 2444942 main.go:144] libmachine: Decoding PEM data...
I0110 02:31:31.771231 2444942 main.go:144] libmachine: Parsing certificate...
I0110 02:31:31.771599 2444942 cli_runner.go:164] Run: docker network inspect force-systemd-flag-389625 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W0110 02:31:31.789231 2444942 cli_runner.go:211] docker network inspect force-systemd-flag-389625 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I0110 02:31:31.789311 2444942 network_create.go:284] running [docker network inspect force-systemd-flag-389625] to gather additional debugging logs...
I0110 02:31:31.789330 2444942 cli_runner.go:164] Run: docker network inspect force-systemd-flag-389625
W0110 02:31:31.804491 2444942 cli_runner.go:211] docker network inspect force-systemd-flag-389625 returned with exit code 1
I0110 02:31:31.804519 2444942 network_create.go:287] error running [docker network inspect force-systemd-flag-389625]: docker network inspect force-systemd-flag-389625: exit status 1
stdout:
[]
stderr:
Error response from daemon: network force-systemd-flag-389625 not found
I0110 02:31:31.804531 2444942 network_create.go:289] output of [docker network inspect force-systemd-flag-389625]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network force-systemd-flag-389625 not found
** /stderr **
I0110 02:31:31.804633 2444942 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0110 02:31:31.821447 2444942 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-eeafa1ec40c7 IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:52:dd:85:54:7e:14} reservation:<nil>}
I0110 02:31:31.821788 2444942 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-0306382db894 IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:9a:0a:12:a6:69:af} reservation:<nil>}
I0110 02:31:31.822120 2444942 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-42f1ed7cacde IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:66:5d:25:88:ef:ef} reservation:<nil>}
I0110 02:31:31.822532 2444942 network.go:206] using free private subnet 192.168.76.0/24: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0x4001977430}
I0110 02:31:31.822549 2444942 network_create.go:124] attempt to create docker network force-systemd-flag-389625 192.168.76.0/24 with gateway 192.168.76.1 and MTU of 1500 ...
I0110 02:31:31.822614 2444942 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.76.0/24 --gateway=192.168.76.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=force-systemd-flag-389625 force-systemd-flag-389625
I0110 02:31:31.879729 2444942 network_create.go:108] docker network force-systemd-flag-389625 192.168.76.0/24 created
I0110 02:31:31.879758 2444942 kic.go:121] calculated static IP "192.168.76.2" for the "force-systemd-flag-389625" container
I0110 02:31:31.879830 2444942 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0110 02:31:31.907715 2444942 cli_runner.go:164] Run: docker volume create force-systemd-flag-389625 --label name.minikube.sigs.k8s.io=force-systemd-flag-389625 --label created_by.minikube.sigs.k8s.io=true
I0110 02:31:31.939677 2444942 oci.go:103] Successfully created a docker volume force-systemd-flag-389625
I0110 02:31:31.939777 2444942 cli_runner.go:164] Run: docker run --rm --name force-systemd-flag-389625-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=force-systemd-flag-389625 --entrypoint /usr/bin/test -v force-systemd-flag-389625:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1767944074-22401@sha256:5af296c365892fa7c4c61cd02bf3cdb33e2c362939e717d7686924b3b3f07773 -d /var/lib
I0110 02:31:33.763406 2444942 cli_runner.go:217] Completed: docker run --rm --name force-systemd-flag-389625-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=force-systemd-flag-389625 --entrypoint /usr/bin/test -v force-systemd-flag-389625:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1767944074-22401@sha256:5af296c365892fa7c4c61cd02bf3cdb33e2c362939e717d7686924b3b3f07773 -d /var/lib: (1.823586252s)
I0110 02:31:33.763439 2444942 oci.go:107] Successfully prepared a docker volume force-systemd-flag-389625
I0110 02:31:33.763488 2444942 preload.go:188] Checking if preload exists for k8s version v1.35.0 and runtime docker
I0110 02:31:33.763505 2444942 kic.go:194] Starting extracting preloaded images to volume ...
I0110 02:31:33.763585 2444942 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/22414-2221005/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.35.0-docker-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v force-systemd-flag-389625:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1767944074-22401@sha256:5af296c365892fa7c4c61cd02bf3cdb33e2c362939e717d7686924b3b3f07773 -I lz4 -xf /preloaded.tar -C /extractDir
I0110 02:31:36.676943 2444942 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/22414-2221005/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.35.0-docker-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v force-systemd-flag-389625:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1767944074-22401@sha256:5af296c365892fa7c4c61cd02bf3cdb33e2c362939e717d7686924b3b3f07773 -I lz4 -xf /preloaded.tar -C /extractDir: (2.913316987s)
I0110 02:31:36.676976 2444942 kic.go:203] duration metric: took 2.913468033s to extract preloaded images to volume ...
W0110 02:31:36.677157 2444942 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I0110 02:31:36.677267 2444942 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0110 02:31:36.733133 2444942 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname force-systemd-flag-389625 --name force-systemd-flag-389625 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=force-systemd-flag-389625 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=force-systemd-flag-389625 --network force-systemd-flag-389625 --ip 192.168.76.2 --volume force-systemd-flag-389625:/var --security-opt apparmor=unconfined --memory=3072mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1767944074-22401@sha256:5af296c365892fa7c4c61cd02bf3cdb33e2c362939e717d7686924b3b3f07773
I0110 02:31:37.020083 2444942 cli_runner.go:164] Run: docker container inspect force-systemd-flag-389625 --format={{.State.Running}}
I0110 02:31:37.049554 2444942 cli_runner.go:164] Run: docker container inspect force-systemd-flag-389625 --format={{.State.Status}}
I0110 02:31:37.073410 2444942 cli_runner.go:164] Run: docker exec force-systemd-flag-389625 stat /var/lib/dpkg/alternatives/iptables
I0110 02:31:37.123872 2444942 oci.go:144] the created container "force-systemd-flag-389625" has a running status.
I0110 02:31:37.123914 2444942 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/22414-2221005/.minikube/machines/force-systemd-flag-389625/id_rsa...
I0110 02:31:37.219546 2444942 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/machines/force-systemd-flag-389625/id_rsa.pub -> /home/docker/.ssh/authorized_keys
I0110 02:31:37.219643 2444942 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/22414-2221005/.minikube/machines/force-systemd-flag-389625/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0110 02:31:37.246178 2444942 cli_runner.go:164] Run: docker container inspect force-systemd-flag-389625 --format={{.State.Status}}
I0110 02:31:37.265663 2444942 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0110 02:31:37.265687 2444942 kic_runner.go:114] Args: [docker exec --privileged force-systemd-flag-389625 chown docker:docker /home/docker/.ssh/authorized_keys]
I0110 02:31:37.315490 2444942 cli_runner.go:164] Run: docker container inspect force-systemd-flag-389625 --format={{.State.Status}}
I0110 02:31:37.344025 2444942 machine.go:94] provisionDockerMachine start ...
I0110 02:31:37.344113 2444942 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" force-systemd-flag-389625
I0110 02:31:37.365329 2444942 main.go:144] libmachine: Using SSH client type: native
I0110 02:31:37.366213 2444942 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x47b2e0] 0x47d7f0 <nil> [] 0s} 127.0.0.1 34986 <nil> <nil>}
I0110 02:31:37.366237 2444942 main.go:144] libmachine: About to run SSH command:
hostname
I0110 02:31:37.366917 2444942 main.go:144] libmachine: Error dialing TCP: ssh: handshake failed: EOF
I0110 02:31:40.525424 2444942 main.go:144] libmachine: SSH cmd err, output: <nil>: force-systemd-flag-389625
I0110 02:31:40.525452 2444942 ubuntu.go:182] provisioning hostname "force-systemd-flag-389625"
I0110 02:31:40.525529 2444942 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" force-systemd-flag-389625
I0110 02:31:40.550883 2444942 main.go:144] libmachine: Using SSH client type: native
I0110 02:31:40.551514 2444942 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x47b2e0] 0x47d7f0 <nil> [] 0s} 127.0.0.1 34986 <nil> <nil>}
I0110 02:31:40.551534 2444942 main.go:144] libmachine: About to run SSH command:
sudo hostname force-systemd-flag-389625 && echo "force-systemd-flag-389625" | sudo tee /etc/hostname
I0110 02:31:40.741599 2444942 main.go:144] libmachine: SSH cmd err, output: <nil>: force-systemd-flag-389625
I0110 02:31:40.741787 2444942 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" force-systemd-flag-389625
I0110 02:31:40.769891 2444942 main.go:144] libmachine: Using SSH client type: native
I0110 02:31:40.770349 2444942 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x47b2e0] 0x47d7f0 <nil> [] 0s} 127.0.0.1 34986 <nil> <nil>}
I0110 02:31:40.770376 2444942 main.go:144] libmachine: About to run SSH command:
if ! grep -xq '.*\sforce-systemd-flag-389625' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 force-systemd-flag-389625/g' /etc/hosts;
else
echo '127.0.1.1 force-systemd-flag-389625' | sudo tee -a /etc/hosts;
fi
fi
I0110 02:31:40.933268 2444942 main.go:144] libmachine: SSH cmd err, output: <nil>:
I0110 02:31:40.933300 2444942 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/22414-2221005/.minikube CaCertPath:/home/jenkins/minikube-integration/22414-2221005/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/22414-2221005/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/22414-2221005/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/22414-2221005/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/22414-2221005/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/22414-2221005/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/22414-2221005/.minikube}
I0110 02:31:40.933334 2444942 ubuntu.go:190] setting up certificates
I0110 02:31:40.933344 2444942 provision.go:84] configureAuth start
I0110 02:31:40.933425 2444942 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" force-systemd-flag-389625
I0110 02:31:40.954041 2444942 provision.go:143] copyHostCerts
I0110 02:31:40.954074 2444942 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/ca.pem -> /home/jenkins/minikube-integration/22414-2221005/.minikube/ca.pem
I0110 02:31:40.954109 2444942 exec_runner.go:144] found /home/jenkins/minikube-integration/22414-2221005/.minikube/ca.pem, removing ...
I0110 02:31:40.954115 2444942 exec_runner.go:203] rm: /home/jenkins/minikube-integration/22414-2221005/.minikube/ca.pem
I0110 02:31:40.954187 2444942 exec_runner.go:151] cp: /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/22414-2221005/.minikube/ca.pem (1082 bytes)
I0110 02:31:40.954287 2444942 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/cert.pem -> /home/jenkins/minikube-integration/22414-2221005/.minikube/cert.pem
I0110 02:31:40.954306 2444942 exec_runner.go:144] found /home/jenkins/minikube-integration/22414-2221005/.minikube/cert.pem, removing ...
I0110 02:31:40.954311 2444942 exec_runner.go:203] rm: /home/jenkins/minikube-integration/22414-2221005/.minikube/cert.pem
I0110 02:31:40.954348 2444942 exec_runner.go:151] cp: /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/22414-2221005/.minikube/cert.pem (1123 bytes)
I0110 02:31:40.954426 2444942 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/key.pem -> /home/jenkins/minikube-integration/22414-2221005/.minikube/key.pem
I0110 02:31:40.954443 2444942 exec_runner.go:144] found /home/jenkins/minikube-integration/22414-2221005/.minikube/key.pem, removing ...
I0110 02:31:40.954447 2444942 exec_runner.go:203] rm: /home/jenkins/minikube-integration/22414-2221005/.minikube/key.pem
I0110 02:31:40.954472 2444942 exec_runner.go:151] cp: /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/22414-2221005/.minikube/key.pem (1679 bytes)
I0110 02:31:40.954527 2444942 provision.go:117] generating server cert: /home/jenkins/minikube-integration/22414-2221005/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/22414-2221005/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/22414-2221005/.minikube/certs/ca-key.pem org=jenkins.force-systemd-flag-389625 san=[127.0.0.1 192.168.76.2 force-systemd-flag-389625 localhost minikube]
I0110 02:31:41.170708 2444942 provision.go:177] copyRemoteCerts
I0110 02:31:41.170784 2444942 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0110 02:31:41.170832 2444942 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" force-systemd-flag-389625
I0110 02:31:41.191286 2444942 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:34986 SSHKeyPath:/home/jenkins/minikube-integration/22414-2221005/.minikube/machines/force-systemd-flag-389625/id_rsa Username:docker}
I0110 02:31:41.302379 2444942 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/machines/server.pem -> /etc/docker/server.pem
I0110 02:31:41.302491 2444942 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/machines/server.pem --> /etc/docker/server.pem (1241 bytes)
I0110 02:31:41.325187 2444942 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/machines/server-key.pem -> /etc/docker/server-key.pem
I0110 02:31:41.325316 2444942 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I0110 02:31:41.349568 2444942 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/ca.pem -> /etc/docker/ca.pem
I0110 02:31:41.349680 2444942 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I0110 02:31:41.371181 2444942 provision.go:87] duration metric: took 437.80859ms to configureAuth
I0110 02:31:41.371265 2444942 ubuntu.go:206] setting minikube options for container-runtime
I0110 02:31:41.371507 2444942 config.go:182] Loaded profile config "force-systemd-flag-389625": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.35.0
I0110 02:31:41.371603 2444942 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" force-systemd-flag-389625
I0110 02:31:41.397226 2444942 main.go:144] libmachine: Using SSH client type: native
I0110 02:31:41.397537 2444942 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x47b2e0] 0x47d7f0 <nil> [] 0s} 127.0.0.1 34986 <nil> <nil>}
I0110 02:31:41.397547 2444942 main.go:144] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I0110 02:31:41.564217 2444942 main.go:144] libmachine: SSH cmd err, output: <nil>: overlay
I0110 02:31:41.564316 2444942 ubuntu.go:71] root file system type: overlay
I0110 02:31:41.564502 2444942 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I0110 02:31:41.564636 2444942 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" force-systemd-flag-389625
I0110 02:31:41.591765 2444942 main.go:144] libmachine: Using SSH client type: native
I0110 02:31:41.592086 2444942 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x47b2e0] 0x47d7f0 <nil> [] 0s} 127.0.0.1 34986 <nil> <nil>}
I0110 02:31:41.592175 2444942 main.go:144] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 \
-H fd:// --containerd=/run/containerd/containerd.sock \
-H unix:///var/run/docker.sock \
--default-ulimit=nofile=1048576:1048576 \
--tlsverify \
--tlscacert /etc/docker/ca.pem \
--tlscert /etc/docker/server.pem \
--tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I0110 02:31:41.761531 2444942 main.go:144] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
I0110 02:31:41.761616 2444942 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" force-systemd-flag-389625
I0110 02:31:41.782449 2444942 main.go:144] libmachine: Using SSH client type: native
I0110 02:31:41.782827 2444942 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x47b2e0] 0x47d7f0 <nil> [] 0s} 127.0.0.1 34986 <nil> <nil>}
I0110 02:31:41.782851 2444942 main.go:144] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I0110 02:31:43.042474 2444942 main.go:144] libmachine: SSH cmd err, output: <nil>: --- /lib/systemd/system/docker.service 2025-12-12 14:49:02.000000000 +0000
+++ /lib/systemd/system/docker.service.new 2026-01-10 02:31:41.754593192 +0000
@@ -9,23 +9,34 @@
[Service]
Type=notify
-# the default is not to use systemd for cgroups because the delegate issues still
-# exists and systemd currently does not support the cgroup feature set required
-# for containers run by docker
-ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
-ExecReload=/bin/kill -s HUP $MAINPID
-TimeoutStartSec=0
-RestartSec=2
Restart=always
+
+
+# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
+# The base configuration already specifies an 'ExecStart=...' command. The first directive
+# here is to clear out that command inherited from the base configuration. Without this,
+# the command from the base configuration and the command specified here are treated as
+# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
+# will catch this invalid input and refuse to start the service with an error like:
+# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
+
+# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
+# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
+ExecStart=
+ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
+ExecReload=/bin/kill -s HUP $MAINPID
+
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
+LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
-# Comment TasksMax if your systemd version does not support it.
-# Only systemd 226 and above support this option.
+# Uncomment TasksMax if your systemd version supports it.
+# Only systemd 226 and above support this version.
TasksMax=infinity
+TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
Synchronizing state of docker.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable docker
I0110 02:31:43.042496 2444942 machine.go:97] duration metric: took 5.698448584s to provisionDockerMachine
I0110 02:31:43.042508 2444942 client.go:176] duration metric: took 11.271502022s to LocalClient.Create
I0110 02:31:43.042522 2444942 start.go:167] duration metric: took 11.271565709s to libmachine.API.Create "force-systemd-flag-389625"
I0110 02:31:43.042529 2444942 start.go:293] postStartSetup for "force-systemd-flag-389625" (driver="docker")
I0110 02:31:43.042539 2444942 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0110 02:31:43.042594 2444942 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0110 02:31:43.042629 2444942 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" force-systemd-flag-389625
I0110 02:31:43.076614 2444942 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:34986 SSHKeyPath:/home/jenkins/minikube-integration/22414-2221005/.minikube/machines/force-systemd-flag-389625/id_rsa Username:docker}
I0110 02:31:43.196482 2444942 ssh_runner.go:195] Run: cat /etc/os-release
I0110 02:31:43.201700 2444942 main.go:144] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0110 02:31:43.201726 2444942 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm)
I0110 02:31:43.201737 2444942 filesync.go:126] Scanning /home/jenkins/minikube-integration/22414-2221005/.minikube/addons for local assets ...
I0110 02:31:43.201796 2444942 filesync.go:126] Scanning /home/jenkins/minikube-integration/22414-2221005/.minikube/files for local assets ...
I0110 02:31:43.201877 2444942 filesync.go:149] local asset: /home/jenkins/minikube-integration/22414-2221005/.minikube/files/etc/ssl/certs/22228772.pem -> 22228772.pem in /etc/ssl/certs
I0110 02:31:43.201885 2444942 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/files/etc/ssl/certs/22228772.pem -> /etc/ssl/certs/22228772.pem
I0110 02:31:43.201986 2444942 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I0110 02:31:43.214196 2444942 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/files/etc/ssl/certs/22228772.pem --> /etc/ssl/certs/22228772.pem (1708 bytes)
I0110 02:31:43.241904 2444942 start.go:296] duration metric: took 199.360809ms for postStartSetup
I0110 02:31:43.242273 2444942 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" force-systemd-flag-389625
I0110 02:31:43.263273 2444942 profile.go:143] Saving config to /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/config.json ...
I0110 02:31:43.263543 2444942 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0110 02:31:43.263584 2444942 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" force-systemd-flag-389625
I0110 02:31:43.283380 2444942 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:34986 SSHKeyPath:/home/jenkins/minikube-integration/22414-2221005/.minikube/machines/force-systemd-flag-389625/id_rsa Username:docker}
I0110 02:31:43.391153 2444942 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0110 02:31:43.396781 2444942 start.go:128] duration metric: took 11.628189455s to createHost
I0110 02:31:43.396804 2444942 start.go:83] releasing machines lock for "force-systemd-flag-389625", held for 11.628322055s
I0110 02:31:43.396875 2444942 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" force-systemd-flag-389625
I0110 02:31:43.415596 2444942 ssh_runner.go:195] Run: cat /version.json
I0110 02:31:43.415661 2444942 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" force-systemd-flag-389625
I0110 02:31:43.415925 2444942 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0110 02:31:43.415983 2444942 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" force-systemd-flag-389625
I0110 02:31:43.442514 2444942 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:34986 SSHKeyPath:/home/jenkins/minikube-integration/22414-2221005/.minikube/machines/force-systemd-flag-389625/id_rsa Username:docker}
I0110 02:31:43.477676 2444942 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:34986 SSHKeyPath:/home/jenkins/minikube-integration/22414-2221005/.minikube/machines/force-systemd-flag-389625/id_rsa Username:docker}
I0110 02:31:43.711077 2444942 ssh_runner.go:195] Run: systemctl --version
I0110 02:31:43.721326 2444942 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W0110 02:31:43.726734 2444942 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I0110 02:31:43.726807 2444942 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0110 02:31:43.760612 2444942 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/10-crio-bridge.conflist.disabled] bridge cni config(s)
I0110 02:31:43.760636 2444942 start.go:496] detecting cgroup driver to use...
I0110 02:31:43.760650 2444942 start.go:500] using "systemd" cgroup driver as enforced via flags
I0110 02:31:43.760747 2444942 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0110 02:31:43.776486 2444942 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I0110 02:31:43.785831 2444942 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0110 02:31:43.795047 2444942 containerd.go:147] configuring containerd to use "systemd" as cgroup driver...
I0110 02:31:43.795106 2444942 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I0110 02:31:43.804716 2444942 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0110 02:31:43.814084 2444942 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0110 02:31:43.823155 2444942 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0110 02:31:43.832515 2444942 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0110 02:31:43.841283 2444942 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0110 02:31:43.850677 2444942 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0110 02:31:43.859949 2444942 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0110 02:31:43.869426 2444942 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0110 02:31:43.878026 2444942 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0110 02:31:43.886454 2444942 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0110 02:31:44.030564 2444942 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0110 02:31:44.134281 2444942 start.go:496] detecting cgroup driver to use...
I0110 02:31:44.134314 2444942 start.go:500] using "systemd" cgroup driver as enforced via flags
I0110 02:31:44.134390 2444942 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I0110 02:31:44.164357 2444942 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I0110 02:31:44.178141 2444942 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
I0110 02:31:44.203502 2444942 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I0110 02:31:44.225293 2444942 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0110 02:31:44.259875 2444942 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I0110 02:31:44.298197 2444942 ssh_runner.go:195] Run: which cri-dockerd
I0110 02:31:44.302282 2444942 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I0110 02:31:44.310035 2444942 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (192 bytes)
I0110 02:31:44.323184 2444942 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I0110 02:31:44.479958 2444942 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I0110 02:31:44.628745 2444942 docker.go:578] configuring docker to use "systemd" as cgroup driver...
I0110 02:31:44.628855 2444942 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (129 bytes)
I0110 02:31:44.646424 2444942 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
I0110 02:31:44.659407 2444942 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0110 02:31:44.806969 2444942 ssh_runner.go:195] Run: sudo systemctl restart docker
I0110 02:31:45.429132 2444942 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I0110 02:31:45.449741 2444942 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I0110 02:31:45.466128 2444942 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0110 02:31:45.483936 2444942 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I0110 02:31:45.652722 2444942 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I0110 02:31:45.851372 2444942 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0110 02:31:46.020791 2444942 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I0110 02:31:46.040175 2444942 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I0110 02:31:46.054245 2444942 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0110 02:31:46.202922 2444942 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I0110 02:31:46.282568 2444942 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0110 02:31:46.299250 2444942 start.go:553] Will wait 60s for socket path /var/run/cri-dockerd.sock
I0110 02:31:46.299324 2444942 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I0110 02:31:46.304150 2444942 start.go:574] Will wait 60s for crictl version
I0110 02:31:46.304219 2444942 ssh_runner.go:195] Run: which crictl
I0110 02:31:46.309882 2444942 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version
I0110 02:31:46.365333 2444942 start.go:590] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 29.1.3
RuntimeApiVersion: v1
I0110 02:31:46.365407 2444942 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0110 02:31:46.397294 2444942 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0110 02:31:46.430776 2444942 out.go:252] * Preparing Kubernetes v1.35.0 on Docker 29.1.3 ...
I0110 02:31:46.430856 2444942 cli_runner.go:164] Run: docker network inspect force-systemd-flag-389625 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0110 02:31:46.446745 2444942 ssh_runner.go:195] Run: grep 192.168.76.1 host.minikube.internal$ /etc/hosts
I0110 02:31:46.450899 2444942 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.76.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0110 02:31:46.460438 2444942 kubeadm.go:884] updating cluster {Name:force-systemd-flag-389625 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1767944074-22401@sha256:5af296c365892fa7c4c61cd02bf3cdb33e2c362939e717d7686924b3b3f07773 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.35.0 ClusterName:force-systemd-flag-389625 Namespace:default APIServerHAVIP: APIServerNam
e:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.35.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSH
AuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s Rosetta:false} ...
I0110 02:31:46.460546 2444942 preload.go:188] Checking if preload exists for k8s version v1.35.0 and runtime docker
I0110 02:31:46.460598 2444942 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0110 02:31:46.482795 2444942 docker.go:694] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.35.0
registry.k8s.io/kube-scheduler:v1.35.0
registry.k8s.io/kube-controller-manager:v1.35.0
registry.k8s.io/kube-proxy:v1.35.0
registry.k8s.io/etcd:3.6.6-0
registry.k8s.io/coredns/coredns:v1.13.1
registry.k8s.io/pause:3.10.1
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I0110 02:31:46.482816 2444942 docker.go:624] Images already preloaded, skipping extraction
I0110 02:31:46.482894 2444942 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0110 02:31:46.503709 2444942 docker.go:694] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.35.0
registry.k8s.io/kube-scheduler:v1.35.0
registry.k8s.io/kube-controller-manager:v1.35.0
registry.k8s.io/kube-proxy:v1.35.0
registry.k8s.io/etcd:3.6.6-0
registry.k8s.io/coredns/coredns:v1.13.1
registry.k8s.io/pause:3.10.1
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I0110 02:31:46.503732 2444942 cache_images.go:86] Images are preloaded, skipping loading
I0110 02:31:46.503741 2444942 kubeadm.go:935] updating node { 192.168.76.2 8443 v1.35.0 docker true true} ...
I0110 02:31:46.503828 2444942 kubeadm.go:947] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.35.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=force-systemd-flag-389625 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.76.2
[Install]
config:
{KubernetesVersion:v1.35.0 ClusterName:force-systemd-flag-389625 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0110 02:31:46.503890 2444942 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
I0110 02:31:46.568277 2444942 cni.go:84] Creating CNI manager for ""
I0110 02:31:46.568357 2444942 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0110 02:31:46.568393 2444942 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I0110 02:31:46.568445 2444942 kubeadm.go:197] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.76.2 APIServerPort:8443 KubernetesVersion:v1.35.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:force-systemd-flag-389625 NodeName:force-systemd-flag-389625 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.76.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.76.2 CgroupDriver:systemd ClientCAFile:/var/lib/minikube/certs/ca.crt S
taticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I0110 02:31:46.568620 2444942 kubeadm.go:203] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.76.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
name: "force-systemd-flag-389625"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.76.2"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.76.2"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.35.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I0110 02:31:46.568728 2444942 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.35.0
I0110 02:31:46.576738 2444942 binaries.go:51] Found k8s binaries, skipping transfer
I0110 02:31:46.576804 2444942 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I0110 02:31:46.584333 2444942 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (324 bytes)
I0110 02:31:46.597086 2444942 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0110 02:31:46.609903 2444942 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2225 bytes)
I0110 02:31:46.623198 2444942 ssh_runner.go:195] Run: grep 192.168.76.2 control-plane.minikube.internal$ /etc/hosts
I0110 02:31:46.627340 2444942 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.76.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0110 02:31:46.637410 2444942 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0110 02:31:46.813351 2444942 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0110 02:31:46.853529 2444942 certs.go:69] Setting up /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625 for IP: 192.168.76.2
I0110 02:31:46.853605 2444942 certs.go:195] generating shared ca certs ...
I0110 02:31:46.853636 2444942 certs.go:227] acquiring lock for ca certs: {Name:mk3365aee58ca444945faa08aa6e1c1a1b730f09 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0110 02:31:46.853847 2444942 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/22414-2221005/.minikube/ca.key
I0110 02:31:46.853930 2444942 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/22414-2221005/.minikube/proxy-client-ca.key
I0110 02:31:46.853957 2444942 certs.go:257] generating profile certs ...
I0110 02:31:46.854046 2444942 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/client.key
I0110 02:31:46.854089 2444942 crypto.go:68] Generating cert /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/client.crt with IP's: []
I0110 02:31:46.947349 2444942 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/client.crt ...
I0110 02:31:46.947424 2444942 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/client.crt: {Name:mkc2a0e18aeb9bc161a2b7bdc69edce7c225059e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0110 02:31:46.947656 2444942 crypto.go:164] Writing key to /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/client.key ...
I0110 02:31:46.947692 2444942 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/client.key: {Name:mkbec37be7fe98f01eeac1efcff3341ee3c0872e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0110 02:31:46.947838 2444942 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/apiserver.key.754ddc11
I0110 02:31:46.947881 2444942 crypto.go:68] Generating cert /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/apiserver.crt.754ddc11 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.76.2]
I0110 02:31:47.211172 2444942 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/apiserver.crt.754ddc11 ...
I0110 02:31:47.211243 2444942 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/apiserver.crt.754ddc11: {Name:mkb26b4fa8a855d6ab75cf6ae5986179421e433d Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0110 02:31:47.211463 2444942 crypto.go:164] Writing key to /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/apiserver.key.754ddc11 ...
I0110 02:31:47.211500 2444942 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/apiserver.key.754ddc11: {Name:mkaede7629652a36b550448eb511dc667db770a6 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0110 02:31:47.211648 2444942 certs.go:382] copying /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/apiserver.crt.754ddc11 -> /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/apiserver.crt
I0110 02:31:47.211795 2444942 certs.go:386] copying /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/apiserver.key.754ddc11 -> /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/apiserver.key
I0110 02:31:47.211904 2444942 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/proxy-client.key
I0110 02:31:47.211947 2444942 crypto.go:68] Generating cert /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/proxy-client.crt with IP's: []
I0110 02:31:47.431675 2444942 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/proxy-client.crt ...
I0110 02:31:47.431751 2444942 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/proxy-client.crt: {Name:mkf0c56bc6a962d35ef411e8b1db0da0dee06e76 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0110 02:31:47.431961 2444942 crypto.go:164] Writing key to /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/proxy-client.key ...
I0110 02:31:47.431997 2444942 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/proxy-client.key: {Name:mk1b1a2249d88d087b490ca8bc1af9bab6c5cd65 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0110 02:31:47.432136 2444942 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/ca.crt -> /var/lib/minikube/certs/ca.crt
I0110 02:31:47.432180 2444942 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/ca.key -> /var/lib/minikube/certs/ca.key
I0110 02:31:47.432212 2444942 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/proxy-client-ca.crt -> /var/lib/minikube/certs/proxy-client-ca.crt
I0110 02:31:47.432258 2444942 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/proxy-client-ca.key -> /var/lib/minikube/certs/proxy-client-ca.key
I0110 02:31:47.432293 2444942 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/apiserver.crt -> /var/lib/minikube/certs/apiserver.crt
I0110 02:31:47.432322 2444942 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/apiserver.key -> /var/lib/minikube/certs/apiserver.key
I0110 02:31:47.432364 2444942 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/proxy-client.crt -> /var/lib/minikube/certs/proxy-client.crt
I0110 02:31:47.432398 2444942 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/proxy-client.key -> /var/lib/minikube/certs/proxy-client.key
I0110 02:31:47.432482 2444942 certs.go:484] found cert: /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/2222877.pem (1338 bytes)
W0110 02:31:47.432539 2444942 certs.go:480] ignoring /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/2222877_empty.pem, impossibly tiny 0 bytes
I0110 02:31:47.432564 2444942 certs.go:484] found cert: /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/ca-key.pem (1675 bytes)
I0110 02:31:47.432623 2444942 certs.go:484] found cert: /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/ca.pem (1082 bytes)
I0110 02:31:47.432673 2444942 certs.go:484] found cert: /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/cert.pem (1123 bytes)
I0110 02:31:47.432730 2444942 certs.go:484] found cert: /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/key.pem (1679 bytes)
I0110 02:31:47.432801 2444942 certs.go:484] found cert: /home/jenkins/minikube-integration/22414-2221005/.minikube/files/etc/ssl/certs/22228772.pem (1708 bytes)
I0110 02:31:47.432861 2444942 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/ca.crt -> /usr/share/ca-certificates/minikubeCA.pem
I0110 02:31:47.432896 2444942 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/2222877.pem -> /usr/share/ca-certificates/2222877.pem
I0110 02:31:47.432926 2444942 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/files/etc/ssl/certs/22228772.pem -> /usr/share/ca-certificates/22228772.pem
I0110 02:31:47.433610 2444942 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0110 02:31:47.453555 2444942 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
I0110 02:31:47.472772 2444942 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0110 02:31:47.493487 2444942 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I0110 02:31:47.513383 2444942 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1436 bytes)
I0110 02:31:47.534626 2444942 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I0110 02:31:47.554446 2444942 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0110 02:31:47.574178 2444942 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I0110 02:31:47.594420 2444942 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0110 02:31:47.614798 2444942 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/2222877.pem --> /usr/share/ca-certificates/2222877.pem (1338 bytes)
I0110 02:31:47.635266 2444942 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/files/etc/ssl/certs/22228772.pem --> /usr/share/ca-certificates/22228772.pem (1708 bytes)
I0110 02:31:47.655406 2444942 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (722 bytes)
I0110 02:31:47.670021 2444942 ssh_runner.go:195] Run: openssl version
I0110 02:31:47.676614 2444942 ssh_runner.go:195] Run: sudo test -s /usr/share/ca-certificates/minikubeCA.pem
I0110 02:31:47.684815 2444942 ssh_runner.go:195] Run: sudo ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem
I0110 02:31:47.693216 2444942 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0110 02:31:47.697583 2444942 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Jan 10 01:54 /usr/share/ca-certificates/minikubeCA.pem
I0110 02:31:47.697646 2444942 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0110 02:31:47.771210 2444942 ssh_runner.go:195] Run: sudo test -L /etc/ssl/certs/b5213941.0
I0110 02:31:47.792458 2444942 ssh_runner.go:195] Run: sudo ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0
I0110 02:31:47.806445 2444942 ssh_runner.go:195] Run: sudo test -s /usr/share/ca-certificates/2222877.pem
I0110 02:31:47.828400 2444942 ssh_runner.go:195] Run: sudo ln -fs /usr/share/ca-certificates/2222877.pem /etc/ssl/certs/2222877.pem
I0110 02:31:47.841461 2444942 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/2222877.pem
I0110 02:31:47.847202 2444942 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Jan 10 02:00 /usr/share/ca-certificates/2222877.pem
I0110 02:31:47.847317 2444942 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/2222877.pem
I0110 02:31:47.889947 2444942 ssh_runner.go:195] Run: sudo test -L /etc/ssl/certs/51391683.0
I0110 02:31:47.898442 2444942 ssh_runner.go:195] Run: sudo ln -fs /etc/ssl/certs/2222877.pem /etc/ssl/certs/51391683.0
I0110 02:31:47.910391 2444942 ssh_runner.go:195] Run: sudo test -s /usr/share/ca-certificates/22228772.pem
I0110 02:31:47.918871 2444942 ssh_runner.go:195] Run: sudo ln -fs /usr/share/ca-certificates/22228772.pem /etc/ssl/certs/22228772.pem
I0110 02:31:47.928363 2444942 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/22228772.pem
I0110 02:31:47.932866 2444942 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Jan 10 02:00 /usr/share/ca-certificates/22228772.pem
I0110 02:31:47.932981 2444942 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/22228772.pem
I0110 02:31:47.975611 2444942 ssh_runner.go:195] Run: sudo test -L /etc/ssl/certs/3ec20f2e.0
I0110 02:31:47.984122 2444942 ssh_runner.go:195] Run: sudo ln -fs /etc/ssl/certs/22228772.pem /etc/ssl/certs/3ec20f2e.0
I0110 02:31:47.992727 2444942 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0110 02:31:47.997508 2444942 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I0110 02:31:47.997608 2444942 kubeadm.go:401] StartCluster: {Name:force-systemd-flag-389625 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1767944074-22401@sha256:5af296c365892fa7c4c61cd02bf3cdb33e2c362939e717d7686924b3b3f07773 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.35.0 ClusterName:force-systemd-flag-389625 Namespace:default APIServerHAVIP: APIServerName:m
inikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.35.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAut
hSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s Rosetta:false}
I0110 02:31:47.997780 2444942 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I0110 02:31:48.015607 2444942 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I0110 02:31:48.027609 2444942 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I0110 02:31:48.037195 2444942 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I0110 02:31:48.037364 2444942 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I0110 02:31:48.049830 2444942 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I0110 02:31:48.049901 2444942 kubeadm.go:158] found existing configuration files:
I0110 02:31:48.049986 2444942 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I0110 02:31:48.059872 2444942 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I0110 02:31:48.059993 2444942 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I0110 02:31:48.068889 2444942 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I0110 02:31:48.079048 2444942 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I0110 02:31:48.079166 2444942 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I0110 02:31:48.088092 2444942 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I0110 02:31:48.098007 2444942 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I0110 02:31:48.098121 2444942 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I0110 02:31:48.107267 2444942 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I0110 02:31:48.117920 2444942 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I0110 02:31:48.118032 2444942 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I0110 02:31:48.127917 2444942 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I0110 02:31:48.180767 2444942 kubeadm.go:319] [init] Using Kubernetes version: v1.35.0
I0110 02:31:48.180909 2444942 kubeadm.go:319] [preflight] Running pre-flight checks
I0110 02:31:48.290339 2444942 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I0110 02:31:48.290624 2444942 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1084-aws[0m
I0110 02:31:48.290676 2444942 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I0110 02:31:48.290728 2444942 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I0110 02:31:48.290780 2444942 kubeadm.go:319] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I0110 02:31:48.290831 2444942 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I0110 02:31:48.290894 2444942 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I0110 02:31:48.290946 2444942 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I0110 02:31:48.291013 2444942 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I0110 02:31:48.291064 2444942 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I0110 02:31:48.291119 2444942 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I0110 02:31:48.291170 2444942 kubeadm.go:319] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I0110 02:31:48.376921 2444942 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I0110 02:31:48.377171 2444942 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I0110 02:31:48.377352 2444942 kubeadm.go:319] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I0110 02:31:48.409493 2444942 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I0110 02:31:48.416465 2444942 out.go:252] - Generating certificates and keys ...
I0110 02:31:48.416688 2444942 kubeadm.go:319] [certs] Using existing ca certificate authority
I0110 02:31:48.416848 2444942 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I0110 02:31:48.613948 2444942 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
I0110 02:31:49.073506 2444942 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
I0110 02:31:49.428686 2444942 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I0110 02:31:49.712507 2444942 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I0110 02:31:49.836655 2444942 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I0110 02:31:49.837353 2444942 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [force-systemd-flag-389625 localhost] and IPs [192.168.76.2 127.0.0.1 ::1]
I0110 02:31:50.119233 2444942 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I0110 02:31:50.120016 2444942 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [force-systemd-flag-389625 localhost] and IPs [192.168.76.2 127.0.0.1 ::1]
I0110 02:31:50.479427 2444942 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I0110 02:31:50.633494 2444942 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I0110 02:31:50.705818 2444942 kubeadm.go:319] [certs] Generating "sa" key and public key
I0110 02:31:50.706064 2444942 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I0110 02:31:50.768089 2444942 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I0110 02:31:50.918537 2444942 kubeadm.go:319] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I0110 02:31:51.105411 2444942 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I0110 02:31:51.794074 2444942 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I0110 02:31:52.020214 2444942 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I0110 02:31:52.020319 2444942 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I0110 02:31:52.025960 2444942 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I0110 02:31:52.029579 2444942 out.go:252] - Booting up control plane ...
I0110 02:31:52.029696 2444942 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I0110 02:31:52.030816 2444942 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I0110 02:31:52.032102 2444942 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I0110 02:31:52.049145 2444942 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I0110 02:31:52.049263 2444942 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
I0110 02:31:52.057814 2444942 kubeadm.go:319] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
I0110 02:31:52.058122 2444942 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I0110 02:31:52.058167 2444942 kubeadm.go:319] [kubelet-start] Starting the kubelet
I0110 02:31:52.196343 2444942 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I0110 02:31:52.196468 2444942 kubeadm.go:319] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I0110 02:35:52.196251 2444942 kubeadm.go:319] [kubelet-check] The kubelet is not healthy after 4m0.000438294s
I0110 02:35:52.196284 2444942 kubeadm.go:319]
I0110 02:35:52.196342 2444942 kubeadm.go:319] Unfortunately, an error has occurred, likely caused by:
I0110 02:35:52.196375 2444942 kubeadm.go:319] - The kubelet is not running
I0110 02:35:52.196480 2444942 kubeadm.go:319] - The kubelet is unhealthy due to a misconfiguration of the node in some way (required cgroups disabled)
I0110 02:35:52.196486 2444942 kubeadm.go:319]
I0110 02:35:52.196591 2444942 kubeadm.go:319] If you are on a systemd-powered system, you can try to troubleshoot the error with the following commands:
I0110 02:35:52.196622 2444942 kubeadm.go:319] - 'systemctl status kubelet'
I0110 02:35:52.196653 2444942 kubeadm.go:319] - 'journalctl -xeu kubelet'
I0110 02:35:52.196658 2444942 kubeadm.go:319]
I0110 02:35:52.202848 2444942 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1084-aws\n", err: exit status 1
I0110 02:35:52.203270 2444942 kubeadm.go:319] [WARNING SystemVerification]: cgroups v1 support is deprecated and will be removed in a future release. Please migrate to cgroups v2. To explicitly enable cgroups v1 support for kubelet v1.35 or newer, you must set the kubelet configuration option 'FailCgroupV1' to 'false'. You must also explicitly skip this validation. For more information, see https://git.k8s.io/enhancements/keps/sig-node/5573-remove-cgroup-v1
I0110 02:35:52.203377 2444942 kubeadm.go:319] [WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I0110 02:35:52.203640 2444942 kubeadm.go:319] error: error execution phase wait-control-plane: failed while waiting for the kubelet to start: The HTTP call equal to 'curl -sSL http://127.0.0.1:10248/healthz' returned error: Get "http://127.0.0.1:10248/healthz": dial tcp 127.0.0.1:10248: connect: connection refused
I0110 02:35:52.203646 2444942 kubeadm.go:319]
I0110 02:35:52.203714 2444942 kubeadm.go:319] To see the stack trace of this error execute with --v=5 or higher
W0110 02:35:52.203844 2444942 out.go:285] ! initialization failed, will try again: wait: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables": Process exited with status 1
stdout:
[init] Using Kubernetes version: v1.35.0
[preflight] Running pre-flight checks
[preflight] The system verification failed. Printing the output from the verification:
[0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1084-aws[0m
[0;37mOS[0m: [0;32mLinux[0m
[0;37mCGROUPS_CPU[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
[0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
[0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
[0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
[0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
[0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
[0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/var/lib/minikube/certs"
[certs] Using existing ca certificate authority
[certs] Using existing apiserver certificate and key on disk
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [force-systemd-flag-389625 localhost] and IPs [192.168.76.2 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [force-systemd-flag-389625 localhost] and IPs [192.168.76.2 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "super-admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
[patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
[kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
[kubelet-check] The kubelet is not healthy after 4m0.000438294s
Unfortunately, an error has occurred, likely caused by:
- The kubelet is not running
- The kubelet is unhealthy due to a misconfiguration of the node in some way (required cgroups disabled)
If you are on a systemd-powered system, you can try to troubleshoot the error with the following commands:
- 'systemctl status kubelet'
- 'journalctl -xeu kubelet'
stderr:
[WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1084-aws\n", err: exit status 1
[WARNING SystemVerification]: cgroups v1 support is deprecated and will be removed in a future release. Please migrate to cgroups v2. To explicitly enable cgroups v1 support for kubelet v1.35 or newer, you must set the kubelet configuration option 'FailCgroupV1' to 'false'. You must also explicitly skip this validation. For more information, see https://git.k8s.io/enhancements/keps/sig-node/5573-remove-cgroup-v1
[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
error: error execution phase wait-control-plane: failed while waiting for the kubelet to start: The HTTP call equal to 'curl -sSL http://127.0.0.1:10248/healthz' returned error: Get "http://127.0.0.1:10248/healthz": dial tcp 127.0.0.1:10248: connect: connection refused
To see the stack trace of this error execute with --v=5 or higher
! initialization failed, will try again: wait: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables": Process exited with status 1
stdout:
[init] Using Kubernetes version: v1.35.0
[preflight] Running pre-flight checks
[preflight] The system verification failed. Printing the output from the verification:
[0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1084-aws[0m
[0;37mOS[0m: [0;32mLinux[0m
[0;37mCGROUPS_CPU[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
[0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
[0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
[0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
[0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
[0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
[0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/var/lib/minikube/certs"
[certs] Using existing ca certificate authority
[certs] Using existing apiserver certificate and key on disk
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [force-systemd-flag-389625 localhost] and IPs [192.168.76.2 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [force-systemd-flag-389625 localhost] and IPs [192.168.76.2 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "super-admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
[patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
[kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
[kubelet-check] The kubelet is not healthy after 4m0.000438294s
Unfortunately, an error has occurred, likely caused by:
- The kubelet is not running
- The kubelet is unhealthy due to a misconfiguration of the node in some way (required cgroups disabled)
If you are on a systemd-powered system, you can try to troubleshoot the error with the following commands:
- 'systemctl status kubelet'
- 'journalctl -xeu kubelet'
stderr:
[WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1084-aws\n", err: exit status 1
[WARNING SystemVerification]: cgroups v1 support is deprecated and will be removed in a future release. Please migrate to cgroups v2. To explicitly enable cgroups v1 support for kubelet v1.35 or newer, you must set the kubelet configuration option 'FailCgroupV1' to 'false'. You must also explicitly skip this validation. For more information, see https://git.k8s.io/enhancements/keps/sig-node/5573-remove-cgroup-v1
[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
error: error execution phase wait-control-plane: failed while waiting for the kubelet to start: The HTTP call equal to 'curl -sSL http://127.0.0.1:10248/healthz' returned error: Get "http://127.0.0.1:10248/healthz": dial tcp 127.0.0.1:10248: connect: connection refused
To see the stack trace of this error execute with --v=5 or higher
I0110 02:35:52.203917 2444942 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm reset --cri-socket /var/run/cri-dockerd.sock --force"
I0110 02:35:52.668064 2444942 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I0110 02:35:52.684406 2444942 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I0110 02:35:52.684471 2444942 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I0110 02:35:52.694960 2444942 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I0110 02:35:52.695030 2444942 kubeadm.go:158] found existing configuration files:
I0110 02:35:52.695114 2444942 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I0110 02:35:52.703880 2444942 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I0110 02:35:52.703940 2444942 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I0110 02:35:52.712165 2444942 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I0110 02:35:52.721863 2444942 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I0110 02:35:52.721985 2444942 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I0110 02:35:52.731171 2444942 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I0110 02:35:52.740287 2444942 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I0110 02:35:52.740404 2444942 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I0110 02:35:52.748618 2444942 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I0110 02:35:52.757969 2444942 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I0110 02:35:52.758029 2444942 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I0110 02:35:52.766204 2444942 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I0110 02:35:52.819064 2444942 kubeadm.go:319] [init] Using Kubernetes version: v1.35.0
I0110 02:35:52.819481 2444942 kubeadm.go:319] [preflight] Running pre-flight checks
I0110 02:35:52.927559 2444942 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I0110 02:35:52.927642 2444942 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1084-aws[0m
I0110 02:35:52.927679 2444942 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I0110 02:35:52.927725 2444942 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I0110 02:35:52.927773 2444942 kubeadm.go:319] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I0110 02:35:52.927829 2444942 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I0110 02:35:52.927879 2444942 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I0110 02:35:52.927933 2444942 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I0110 02:35:52.927982 2444942 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I0110 02:35:52.928027 2444942 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I0110 02:35:52.928076 2444942 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I0110 02:35:52.928122 2444942 kubeadm.go:319] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I0110 02:35:53.012278 2444942 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I0110 02:35:53.012391 2444942 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I0110 02:35:53.012483 2444942 kubeadm.go:319] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I0110 02:35:53.037432 2444942 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I0110 02:35:53.040921 2444942 out.go:252] - Generating certificates and keys ...
I0110 02:35:53.041059 2444942 kubeadm.go:319] [certs] Using existing ca certificate authority
I0110 02:35:53.041136 2444942 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I0110 02:35:53.041218 2444942 kubeadm.go:319] [certs] Using existing apiserver-kubelet-client certificate and key on disk
I0110 02:35:53.041284 2444942 kubeadm.go:319] [certs] Using existing front-proxy-ca certificate authority
I0110 02:35:53.041359 2444942 kubeadm.go:319] [certs] Using existing front-proxy-client certificate and key on disk
I0110 02:35:53.041417 2444942 kubeadm.go:319] [certs] Using existing etcd/ca certificate authority
I0110 02:35:53.041484 2444942 kubeadm.go:319] [certs] Using existing etcd/server certificate and key on disk
I0110 02:35:53.041550 2444942 kubeadm.go:319] [certs] Using existing etcd/peer certificate and key on disk
I0110 02:35:53.041630 2444942 kubeadm.go:319] [certs] Using existing etcd/healthcheck-client certificate and key on disk
I0110 02:35:53.041707 2444942 kubeadm.go:319] [certs] Using existing apiserver-etcd-client certificate and key on disk
I0110 02:35:53.041749 2444942 kubeadm.go:319] [certs] Using the existing "sa" key
I0110 02:35:53.041814 2444942 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I0110 02:35:53.331718 2444942 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I0110 02:35:53.451638 2444942 kubeadm.go:319] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I0110 02:35:53.804134 2444942 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I0110 02:35:54.036793 2444942 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I0110 02:35:54.605846 2444942 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I0110 02:35:54.606454 2444942 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I0110 02:35:54.608995 2444942 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I0110 02:35:54.612162 2444942 out.go:252] - Booting up control plane ...
I0110 02:35:54.612265 2444942 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I0110 02:35:54.612343 2444942 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I0110 02:35:54.612409 2444942 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I0110 02:35:54.632870 2444942 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I0110 02:35:54.633407 2444942 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
I0110 02:35:54.640913 2444942 kubeadm.go:319] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
I0110 02:35:54.641255 2444942 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I0110 02:35:54.641302 2444942 kubeadm.go:319] [kubelet-start] Starting the kubelet
I0110 02:35:54.777508 2444942 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I0110 02:35:54.777628 2444942 kubeadm.go:319] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I0110 02:39:54.778464 2444942 kubeadm.go:319] [kubelet-check] The kubelet is not healthy after 4m0.001115013s
I0110 02:39:54.778491 2444942 kubeadm.go:319]
I0110 02:39:54.778555 2444942 kubeadm.go:319] Unfortunately, an error has occurred, likely caused by:
I0110 02:39:54.778601 2444942 kubeadm.go:319] - The kubelet is not running
I0110 02:39:54.778725 2444942 kubeadm.go:319] - The kubelet is unhealthy due to a misconfiguration of the node in some way (required cgroups disabled)
I0110 02:39:54.778735 2444942 kubeadm.go:319]
I0110 02:39:54.778847 2444942 kubeadm.go:319] If you are on a systemd-powered system, you can try to troubleshoot the error with the following commands:
I0110 02:39:54.778883 2444942 kubeadm.go:319] - 'systemctl status kubelet'
I0110 02:39:54.778919 2444942 kubeadm.go:319] - 'journalctl -xeu kubelet'
I0110 02:39:54.778927 2444942 kubeadm.go:319]
I0110 02:39:54.783246 2444942 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1084-aws\n", err: exit status 1
I0110 02:39:54.783712 2444942 kubeadm.go:319] [WARNING SystemVerification]: cgroups v1 support is deprecated and will be removed in a future release. Please migrate to cgroups v2. To explicitly enable cgroups v1 support for kubelet v1.35 or newer, you must set the kubelet configuration option 'FailCgroupV1' to 'false'. You must also explicitly skip this validation. For more information, see https://git.k8s.io/enhancements/keps/sig-node/5573-remove-cgroup-v1
I0110 02:39:54.783842 2444942 kubeadm.go:319] [WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I0110 02:39:54.784133 2444942 kubeadm.go:319] error: error execution phase wait-control-plane: failed while waiting for the kubelet to start: The HTTP call equal to 'curl -sSL http://127.0.0.1:10248/healthz' returned error: Get "http://127.0.0.1:10248/healthz": dial tcp 127.0.0.1:10248: connect: connection refused
I0110 02:39:54.784143 2444942 kubeadm.go:319]
I0110 02:39:54.784229 2444942 kubeadm.go:319] To see the stack trace of this error execute with --v=5 or higher
I0110 02:39:54.784293 2444942 kubeadm.go:403] duration metric: took 8m6.786690861s to StartCluster
I0110 02:39:54.784334 2444942 cri.go:61] listing CRI containers in root : {State:all Name:kube-apiserver Namespaces:[]}
I0110 02:39:54.784409 2444942 ssh_runner.go:195] Run: sudo crictl --timeout=10s ps -a --quiet --name=kube-apiserver
I0110 02:39:54.833811 2444942 cri.go:96] found id: ""
I0110 02:39:54.833848 2444942 logs.go:282] 0 containers: []
W0110 02:39:54.833857 2444942 logs.go:284] No container was found matching "kube-apiserver"
I0110 02:39:54.833864 2444942 cri.go:61] listing CRI containers in root : {State:all Name:etcd Namespaces:[]}
I0110 02:39:54.833927 2444942 ssh_runner.go:195] Run: sudo crictl --timeout=10s ps -a --quiet --name=etcd
I0110 02:39:54.874597 2444942 cri.go:96] found id: ""
I0110 02:39:54.874676 2444942 logs.go:282] 0 containers: []
W0110 02:39:54.874698 2444942 logs.go:284] No container was found matching "etcd"
I0110 02:39:54.874717 2444942 cri.go:61] listing CRI containers in root : {State:all Name:coredns Namespaces:[]}
I0110 02:39:54.874799 2444942 ssh_runner.go:195] Run: sudo crictl --timeout=10s ps -a --quiet --name=coredns
I0110 02:39:54.907340 2444942 cri.go:96] found id: ""
I0110 02:39:54.907364 2444942 logs.go:282] 0 containers: []
W0110 02:39:54.907372 2444942 logs.go:284] No container was found matching "coredns"
I0110 02:39:54.907379 2444942 cri.go:61] listing CRI containers in root : {State:all Name:kube-scheduler Namespaces:[]}
I0110 02:39:54.907439 2444942 ssh_runner.go:195] Run: sudo crictl --timeout=10s ps -a --quiet --name=kube-scheduler
I0110 02:39:54.942974 2444942 cri.go:96] found id: ""
I0110 02:39:54.943001 2444942 logs.go:282] 0 containers: []
W0110 02:39:54.943010 2444942 logs.go:284] No container was found matching "kube-scheduler"
I0110 02:39:54.943018 2444942 cri.go:61] listing CRI containers in root : {State:all Name:kube-proxy Namespaces:[]}
I0110 02:39:54.943077 2444942 ssh_runner.go:195] Run: sudo crictl --timeout=10s ps -a --quiet --name=kube-proxy
I0110 02:39:54.981427 2444942 cri.go:96] found id: ""
I0110 02:39:54.981449 2444942 logs.go:282] 0 containers: []
W0110 02:39:54.981458 2444942 logs.go:284] No container was found matching "kube-proxy"
I0110 02:39:54.981465 2444942 cri.go:61] listing CRI containers in root : {State:all Name:kube-controller-manager Namespaces:[]}
I0110 02:39:54.981531 2444942 ssh_runner.go:195] Run: sudo crictl --timeout=10s ps -a --quiet --name=kube-controller-manager
I0110 02:39:55.041924 2444942 cri.go:96] found id: ""
I0110 02:39:55.041946 2444942 logs.go:282] 0 containers: []
W0110 02:39:55.041994 2444942 logs.go:284] No container was found matching "kube-controller-manager"
I0110 02:39:55.042004 2444942 cri.go:61] listing CRI containers in root : {State:all Name:kindnet Namespaces:[]}
I0110 02:39:55.042072 2444942 ssh_runner.go:195] Run: sudo crictl --timeout=10s ps -a --quiet --name=kindnet
I0110 02:39:55.114566 2444942 cri.go:96] found id: ""
I0110 02:39:55.114587 2444942 logs.go:282] 0 containers: []
W0110 02:39:55.114596 2444942 logs.go:284] No container was found matching "kindnet"
I0110 02:39:55.114606 2444942 logs.go:123] Gathering logs for describe nodes ...
I0110 02:39:55.114634 2444942 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W0110 02:39:55.229791 2444942 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
E0110 02:39:55.208165 5551 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8443/api?timeout=32s\": dial tcp [::1]:8443: connect: connection refused"
E0110 02:39:55.208559 5551 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8443/api?timeout=32s\": dial tcp [::1]:8443: connect: connection refused"
E0110 02:39:55.217197 5551 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8443/api?timeout=32s\": dial tcp [::1]:8443: connect: connection refused"
E0110 02:39:55.218001 5551 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8443/api?timeout=32s\": dial tcp [::1]:8443: connect: connection refused"
E0110 02:39:55.222039 5551 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8443/api?timeout=32s\": dial tcp [::1]:8443: connect: connection refused"
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
E0110 02:39:55.208165 5551 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8443/api?timeout=32s\": dial tcp [::1]:8443: connect: connection refused"
E0110 02:39:55.208559 5551 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8443/api?timeout=32s\": dial tcp [::1]:8443: connect: connection refused"
E0110 02:39:55.217197 5551 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8443/api?timeout=32s\": dial tcp [::1]:8443: connect: connection refused"
E0110 02:39:55.218001 5551 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8443/api?timeout=32s\": dial tcp [::1]:8443: connect: connection refused"
E0110 02:39:55.222039 5551 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8443/api?timeout=32s\": dial tcp [::1]:8443: connect: connection refused"
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I0110 02:39:55.229812 2444942 logs.go:123] Gathering logs for Docker ...
I0110 02:39:55.229837 2444942 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u docker -u cri-docker -n 400"
I0110 02:39:55.267290 2444942 logs.go:123] Gathering logs for container status ...
I0110 02:39:55.267338 2444942 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I0110 02:39:55.359988 2444942 logs.go:123] Gathering logs for kubelet ...
I0110 02:39:55.360018 2444942 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I0110 02:39:55.456371 2444942 logs.go:123] Gathering logs for dmesg ...
I0110 02:39:55.456405 2444942 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
W0110 02:39:55.476932 2444942 out.go:434] Error starting cluster: wait: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables": Process exited with status 1
stdout:
[init] Using Kubernetes version: v1.35.0
[preflight] Running pre-flight checks
[preflight] The system verification failed. Printing the output from the verification:
[0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1084-aws[0m
[0;37mOS[0m: [0;32mLinux[0m
[0;37mCGROUPS_CPU[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
[0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
[0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
[0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
[0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
[0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
[0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/var/lib/minikube/certs"
[certs] Using existing ca certificate authority
[certs] Using existing apiserver certificate and key on disk
[certs] Using existing apiserver-kubelet-client certificate and key on disk
[certs] Using existing front-proxy-ca certificate authority
[certs] Using existing front-proxy-client certificate and key on disk
[certs] Using existing etcd/ca certificate authority
[certs] Using existing etcd/server certificate and key on disk
[certs] Using existing etcd/peer certificate and key on disk
[certs] Using existing etcd/healthcheck-client certificate and key on disk
[certs] Using existing apiserver-etcd-client certificate and key on disk
[certs] Using the existing "sa" key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "super-admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
[patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
[kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
[kubelet-check] The kubelet is not healthy after 4m0.001115013s
Unfortunately, an error has occurred, likely caused by:
- The kubelet is not running
- The kubelet is unhealthy due to a misconfiguration of the node in some way (required cgroups disabled)
If you are on a systemd-powered system, you can try to troubleshoot the error with the following commands:
- 'systemctl status kubelet'
- 'journalctl -xeu kubelet'
stderr:
[WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1084-aws\n", err: exit status 1
[WARNING SystemVerification]: cgroups v1 support is deprecated and will be removed in a future release. Please migrate to cgroups v2. To explicitly enable cgroups v1 support for kubelet v1.35 or newer, you must set the kubelet configuration option 'FailCgroupV1' to 'false'. You must also explicitly skip this validation. For more information, see https://git.k8s.io/enhancements/keps/sig-node/5573-remove-cgroup-v1
[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
error: error execution phase wait-control-plane: failed while waiting for the kubelet to start: The HTTP call equal to 'curl -sSL http://127.0.0.1:10248/healthz' returned error: Get "http://127.0.0.1:10248/healthz": dial tcp 127.0.0.1:10248: connect: connection refused
To see the stack trace of this error execute with --v=5 or higher
W0110 02:39:55.476973 2444942 out.go:285] *
*
W0110 02:39:55.477022 2444942 out.go:285] X Error starting cluster: wait: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables": Process exited with status 1
stdout:
[init] Using Kubernetes version: v1.35.0
[preflight] Running pre-flight checks
[preflight] The system verification failed. Printing the output from the verification:
[0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1084-aws[0m
[0;37mOS[0m: [0;32mLinux[0m
[0;37mCGROUPS_CPU[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
[0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
[0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
[0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
[0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
[0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
[0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/var/lib/minikube/certs"
[certs] Using existing ca certificate authority
[certs] Using existing apiserver certificate and key on disk
[certs] Using existing apiserver-kubelet-client certificate and key on disk
[certs] Using existing front-proxy-ca certificate authority
[certs] Using existing front-proxy-client certificate and key on disk
[certs] Using existing etcd/ca certificate authority
[certs] Using existing etcd/server certificate and key on disk
[certs] Using existing etcd/peer certificate and key on disk
[certs] Using existing etcd/healthcheck-client certificate and key on disk
[certs] Using existing apiserver-etcd-client certificate and key on disk
[certs] Using the existing "sa" key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "super-admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
[patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
[kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
[kubelet-check] The kubelet is not healthy after 4m0.001115013s
Unfortunately, an error has occurred, likely caused by:
- The kubelet is not running
- The kubelet is unhealthy due to a misconfiguration of the node in some way (required cgroups disabled)
If you are on a systemd-powered system, you can try to troubleshoot the error with the following commands:
- 'systemctl status kubelet'
- 'journalctl -xeu kubelet'
stderr:
[WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1084-aws\n", err: exit status 1
[WARNING SystemVerification]: cgroups v1 support is deprecated and will be removed in a future release. Please migrate to cgroups v2. To explicitly enable cgroups v1 support for kubelet v1.35 or newer, you must set the kubelet configuration option 'FailCgroupV1' to 'false'. You must also explicitly skip this validation. For more information, see https://git.k8s.io/enhancements/keps/sig-node/5573-remove-cgroup-v1
[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
error: error execution phase wait-control-plane: failed while waiting for the kubelet to start: The HTTP call equal to 'curl -sSL http://127.0.0.1:10248/healthz' returned error: Get "http://127.0.0.1:10248/healthz": dial tcp 127.0.0.1:10248: connect: connection refused
To see the stack trace of this error execute with --v=5 or higher
X Error starting cluster: wait: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables": Process exited with status 1
stdout:
[init] Using Kubernetes version: v1.35.0
[preflight] Running pre-flight checks
[preflight] The system verification failed. Printing the output from the verification:
[0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1084-aws[0m
[0;37mOS[0m: [0;32mLinux[0m
[0;37mCGROUPS_CPU[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
[0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
[0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
[0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
[0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
[0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
[0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/var/lib/minikube/certs"
[certs] Using existing ca certificate authority
[certs] Using existing apiserver certificate and key on disk
[certs] Using existing apiserver-kubelet-client certificate and key on disk
[certs] Using existing front-proxy-ca certificate authority
[certs] Using existing front-proxy-client certificate and key on disk
[certs] Using existing etcd/ca certificate authority
[certs] Using existing etcd/server certificate and key on disk
[certs] Using existing etcd/peer certificate and key on disk
[certs] Using existing etcd/healthcheck-client certificate and key on disk
[certs] Using existing apiserver-etcd-client certificate and key on disk
[certs] Using the existing "sa" key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "super-admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
[patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
[kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
[kubelet-check] The kubelet is not healthy after 4m0.001115013s
Unfortunately, an error has occurred, likely caused by:
- The kubelet is not running
- The kubelet is unhealthy due to a misconfiguration of the node in some way (required cgroups disabled)
If you are on a systemd-powered system, you can try to troubleshoot the error with the following commands:
- 'systemctl status kubelet'
- 'journalctl -xeu kubelet'
stderr:
[WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1084-aws\n", err: exit status 1
[WARNING SystemVerification]: cgroups v1 support is deprecated and will be removed in a future release. Please migrate to cgroups v2. To explicitly enable cgroups v1 support for kubelet v1.35 or newer, you must set the kubelet configuration option 'FailCgroupV1' to 'false'. You must also explicitly skip this validation. For more information, see https://git.k8s.io/enhancements/keps/sig-node/5573-remove-cgroup-v1
[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
error: error execution phase wait-control-plane: failed while waiting for the kubelet to start: The HTTP call equal to 'curl -sSL http://127.0.0.1:10248/healthz' returned error: Get "http://127.0.0.1:10248/healthz": dial tcp 127.0.0.1:10248: connect: connection refused
To see the stack trace of this error execute with --v=5 or higher
W0110 02:39:55.477184 2444942 out.go:285] *
*
W0110 02:39:55.477459 2444942 out.go:308] ╭─────────────────────────────────────────────────────────────────────────────────────────────╮
│ │
│ * If the above advice does not help, please let us know: │
│ https://github.com/kubernetes/minikube/issues/new/choose │
│ │
│ * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue. │
│ │
╰─────────────────────────────────────────────────────────────────────────────────────────────╯
╭─────────────────────────────────────────────────────────────────────────────────────────────╮
│ │
│ * If the above advice does not help, please let us know: │
│ https://github.com/kubernetes/minikube/issues/new/choose │
│ │
│ * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue. │
│ │
╰─────────────────────────────────────────────────────────────────────────────────────────────╯
I0110 02:39:55.484573 2444942 out.go:203]
W0110 02:39:55.488432 2444942 out.go:285] X Exiting due to K8S_KUBELET_NOT_RUNNING: wait: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables": Process exited with status 1
stdout:
[init] Using Kubernetes version: v1.35.0
[preflight] Running pre-flight checks
[preflight] The system verification failed. Printing the output from the verification:
[0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1084-aws[0m
[0;37mOS[0m: [0;32mLinux[0m
[0;37mCGROUPS_CPU[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
[0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
[0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
[0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
[0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
[0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
[0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/var/lib/minikube/certs"
[certs] Using existing ca certificate authority
[certs] Using existing apiserver certificate and key on disk
[certs] Using existing apiserver-kubelet-client certificate and key on disk
[certs] Using existing front-proxy-ca certificate authority
[certs] Using existing front-proxy-client certificate and key on disk
[certs] Using existing etcd/ca certificate authority
[certs] Using existing etcd/server certificate and key on disk
[certs] Using existing etcd/peer certificate and key on disk
[certs] Using existing etcd/healthcheck-client certificate and key on disk
[certs] Using existing apiserver-etcd-client certificate and key on disk
[certs] Using the existing "sa" key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "super-admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
[patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
[kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
[kubelet-check] The kubelet is not healthy after 4m0.001115013s
Unfortunately, an error has occurred, likely caused by:
- The kubelet is not running
- The kubelet is unhealthy due to a misconfiguration of the node in some way (required cgroups disabled)
If you are on a systemd-powered system, you can try to troubleshoot the error with the following commands:
- 'systemctl status kubelet'
- 'journalctl -xeu kubelet'
stderr:
[WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1084-aws\n", err: exit status 1
[WARNING SystemVerification]: cgroups v1 support is deprecated and will be removed in a future release. Please migrate to cgroups v2. To explicitly enable cgroups v1 support for kubelet v1.35 or newer, you must set the kubelet configuration option 'FailCgroupV1' to 'false'. You must also explicitly skip this validation. For more information, see https://git.k8s.io/enhancements/keps/sig-node/5573-remove-cgroup-v1
[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
error: error execution phase wait-control-plane: failed while waiting for the kubelet to start: The HTTP call equal to 'curl -sSL http://127.0.0.1:10248/healthz' returned error: Get "http://127.0.0.1:10248/healthz": dial tcp 127.0.0.1:10248: connect: connection refused
To see the stack trace of this error execute with --v=5 or higher
X Exiting due to K8S_KUBELET_NOT_RUNNING: wait: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables": Process exited with status 1
stdout:
[init] Using Kubernetes version: v1.35.0
[preflight] Running pre-flight checks
[preflight] The system verification failed. Printing the output from the verification:
[0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1084-aws[0m
[0;37mOS[0m: [0;32mLinux[0m
[0;37mCGROUPS_CPU[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
[0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
[0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
[0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
[0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
[0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
[0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/var/lib/minikube/certs"
[certs] Using existing ca certificate authority
[certs] Using existing apiserver certificate and key on disk
[certs] Using existing apiserver-kubelet-client certificate and key on disk
[certs] Using existing front-proxy-ca certificate authority
[certs] Using existing front-proxy-client certificate and key on disk
[certs] Using existing etcd/ca certificate authority
[certs] Using existing etcd/server certificate and key on disk
[certs] Using existing etcd/peer certificate and key on disk
[certs] Using existing etcd/healthcheck-client certificate and key on disk
[certs] Using existing apiserver-etcd-client certificate and key on disk
[certs] Using the existing "sa" key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "super-admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
[patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
[kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
[kubelet-check] The kubelet is not healthy after 4m0.001115013s
Unfortunately, an error has occurred, likely caused by:
- The kubelet is not running
- The kubelet is unhealthy due to a misconfiguration of the node in some way (required cgroups disabled)
If you are on a systemd-powered system, you can try to troubleshoot the error with the following commands:
- 'systemctl status kubelet'
- 'journalctl -xeu kubelet'
stderr:
[WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1084-aws\n", err: exit status 1
[WARNING SystemVerification]: cgroups v1 support is deprecated and will be removed in a future release. Please migrate to cgroups v2. To explicitly enable cgroups v1 support for kubelet v1.35 or newer, you must set the kubelet configuration option 'FailCgroupV1' to 'false'. You must also explicitly skip this validation. For more information, see https://git.k8s.io/enhancements/keps/sig-node/5573-remove-cgroup-v1
[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
error: error execution phase wait-control-plane: failed while waiting for the kubelet to start: The HTTP call equal to 'curl -sSL http://127.0.0.1:10248/healthz' returned error: Get "http://127.0.0.1:10248/healthz": dial tcp 127.0.0.1:10248: connect: connection refused
To see the stack trace of this error execute with --v=5 or higher
W0110 02:39:55.488495 2444942 out.go:285] * Suggestion: Check output of 'journalctl -xeu kubelet', try passing --extra-config=kubelet.cgroup-driver=systemd to minikube start
* Suggestion: Check output of 'journalctl -xeu kubelet', try passing --extra-config=kubelet.cgroup-driver=systemd to minikube start
W0110 02:39:55.488519 2444942 out.go:285] * Related issue: https://github.com/kubernetes/minikube/issues/4172
* Related issue: https://github.com/kubernetes/minikube/issues/4172
I0110 02:39:55.491695 2444942 out.go:203]
** /stderr **
docker_test.go:93: failed to start minikube with args: "out/minikube-linux-arm64 start -p force-systemd-flag-389625 --memory=3072 --force-systemd --alsologtostderr -v=5 --driver=docker --container-runtime=docker" : exit status 109
docker_test.go:110: (dbg) Run: out/minikube-linux-arm64 -p force-systemd-flag-389625 ssh "docker info --format {{.CgroupDriver}}"
docker_test.go:106: *** TestForceSystemdFlag FAILED at 2026-01-10 02:39:56.082127666 +0000 UTC m=+2780.290215509
helpers_test.go:223: -----------------------post-mortem--------------------------------
helpers_test.go:224: ======> post-mortem[TestForceSystemdFlag]: network settings <======
helpers_test.go:231: HOST ENV snapshots: PROXY env: HTTP_PROXY="<empty>" HTTPS_PROXY="<empty>" NO_PROXY="<empty>"
helpers_test.go:239: ======> post-mortem[TestForceSystemdFlag]: docker inspect <======
helpers_test.go:240: (dbg) Run: docker inspect force-systemd-flag-389625
helpers_test.go:244: (dbg) docker inspect force-systemd-flag-389625:
-- stdout --
[
{
"Id": "c756577a6f6a66791f7adeb9d3115dfe4eeccdd8300730bb86214a9483838d07",
"Created": "2026-01-10T02:31:36.748439063Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 2446115,
"ExitCode": 0,
"Error": "",
"StartedAt": "2026-01-10T02:31:36.807266698Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:61b418c457107ee7d9335f5e03d8e7ecced6bcc2627a71ae5411ca466c7b614b",
"ResolvConfPath": "/var/lib/docker/containers/c756577a6f6a66791f7adeb9d3115dfe4eeccdd8300730bb86214a9483838d07/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/c756577a6f6a66791f7adeb9d3115dfe4eeccdd8300730bb86214a9483838d07/hostname",
"HostsPath": "/var/lib/docker/containers/c756577a6f6a66791f7adeb9d3115dfe4eeccdd8300730bb86214a9483838d07/hosts",
"LogPath": "/var/lib/docker/containers/c756577a6f6a66791f7adeb9d3115dfe4eeccdd8300730bb86214a9483838d07/c756577a6f6a66791f7adeb9d3115dfe4eeccdd8300730bb86214a9483838d07-json.log",
"Name": "/force-systemd-flag-389625",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"force-systemd-flag-389625:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {}
},
"NetworkMode": "force-systemd-flag-389625",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 3221225472,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 6442450944,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"ID": "c756577a6f6a66791f7adeb9d3115dfe4eeccdd8300730bb86214a9483838d07",
"LowerDir": "/var/lib/docker/overlay2/c8ed87a7a55b3a8d25106eba6a27b7e72ac056695741710f830c1cf815bcbb12-init/diff:/var/lib/docker/overlay2/3279adf6388395c7fd34e962c09da15366b225a7b796d4f2275704eeca225de8/diff",
"MergedDir": "/var/lib/docker/overlay2/c8ed87a7a55b3a8d25106eba6a27b7e72ac056695741710f830c1cf815bcbb12/merged",
"UpperDir": "/var/lib/docker/overlay2/c8ed87a7a55b3a8d25106eba6a27b7e72ac056695741710f830c1cf815bcbb12/diff",
"WorkDir": "/var/lib/docker/overlay2/c8ed87a7a55b3a8d25106eba6a27b7e72ac056695741710f830c1cf815bcbb12/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "force-systemd-flag-389625",
"Source": "/var/lib/docker/volumes/force-systemd-flag-389625/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "force-systemd-flag-389625",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1767944074-22401@sha256:5af296c365892fa7c4c61cd02bf3cdb33e2c362939e717d7686924b3b3f07773",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "force-systemd-flag-389625",
"name.minikube.sigs.k8s.io": "force-systemd-flag-389625",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "b17fe84301aa8a8771e83ad3aba6d5e0aa042020435b7617086a541938e24c45",
"SandboxKey": "/var/run/docker/netns/b17fe84301aa",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "34986"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "34987"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "34990"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "34988"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "34989"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"force-systemd-flag-389625": {
"IPAMConfig": {
"IPv4Address": "192.168.76.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "a6:4e:b7:f0:e8:e9",
"DriverOpts": null,
"GwPriority": 0,
"NetworkID": "05b624cbaecbfb4107db7d6109f5dec1fb867b1cd7ee4eb16a9bf04ca55ff1d2",
"EndpointID": "3e47fb6f72bf1949091c2d646103f916b9d921452b0a2a0af24d1f8b7f86d253",
"Gateway": "192.168.76.1",
"IPAddress": "192.168.76.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"force-systemd-flag-389625",
"c756577a6f6a"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:248: (dbg) Run: out/minikube-linux-arm64 status --format={{.Host}} -p force-systemd-flag-389625 -n force-systemd-flag-389625
helpers_test.go:248: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.Host}} -p force-systemd-flag-389625 -n force-systemd-flag-389625: exit status 6 (477.481468ms)
-- stdout --
Running
WARNING: Your kubectl is pointing to stale minikube-vm.
To fix the kubectl context, run `minikube update-context`
-- /stdout --
** stderr **
E0110 02:39:56.571329 2457811 status.go:458] kubeconfig endpoint: get endpoint: "force-systemd-flag-389625" does not appear in /home/jenkins/minikube-integration/22414-2221005/kubeconfig
** /stderr **
helpers_test.go:248: status error: exit status 6 (may be ok)
helpers_test.go:253: <<< TestForceSystemdFlag FAILED: start of post-mortem logs <<<
helpers_test.go:254: ======> post-mortem[TestForceSystemdFlag]: minikube logs <======
helpers_test.go:256: (dbg) Run: out/minikube-linux-arm64 -p force-systemd-flag-389625 logs -n 25
helpers_test.go:261: TestForceSystemdFlag logs:
-- stdout --
==> Audit <==
┌─────────┬───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬───────────────────────────┬─────────┬─────────┬─────────────────────┬─────────────────────┐
│ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │
├─────────┼───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼───────────────────────────┼─────────┼─────────┼─────────────────────┼─────────────────────┤
│ ssh │ -p cilium-818554 sudo systemctl status docker --all --full --no-pager │ cilium-818554 │ jenkins │ v1.37.0 │ 10 Jan 26 02:31 UTC │ │
│ ssh │ -p cilium-818554 sudo systemctl cat docker --no-pager │ cilium-818554 │ jenkins │ v1.37.0 │ 10 Jan 26 02:31 UTC │ │
│ ssh │ -p cilium-818554 sudo cat /etc/docker/daemon.json │ cilium-818554 │ jenkins │ v1.37.0 │ 10 Jan 26 02:31 UTC │ │
│ ssh │ -p cilium-818554 sudo docker system info │ cilium-818554 │ jenkins │ v1.37.0 │ 10 Jan 26 02:31 UTC │ │
│ ssh │ -p cilium-818554 sudo systemctl status cri-docker --all --full --no-pager │ cilium-818554 │ jenkins │ v1.37.0 │ 10 Jan 26 02:31 UTC │ │
│ ssh │ -p cilium-818554 sudo systemctl cat cri-docker --no-pager │ cilium-818554 │ jenkins │ v1.37.0 │ 10 Jan 26 02:31 UTC │ │
│ ssh │ -p cilium-818554 sudo cat /etc/systemd/system/cri-docker.service.d/10-cni.conf │ cilium-818554 │ jenkins │ v1.37.0 │ 10 Jan 26 02:31 UTC │ │
│ ssh │ -p cilium-818554 sudo cat /usr/lib/systemd/system/cri-docker.service │ cilium-818554 │ jenkins │ v1.37.0 │ 10 Jan 26 02:31 UTC │ │
│ ssh │ -p cilium-818554 sudo cri-dockerd --version │ cilium-818554 │ jenkins │ v1.37.0 │ 10 Jan 26 02:31 UTC │ │
│ ssh │ -p cilium-818554 sudo systemctl status containerd --all --full --no-pager │ cilium-818554 │ jenkins │ v1.37.0 │ 10 Jan 26 02:31 UTC │ │
│ ssh │ -p cilium-818554 sudo systemctl cat containerd --no-pager │ cilium-818554 │ jenkins │ v1.37.0 │ 10 Jan 26 02:31 UTC │ │
│ ssh │ -p cilium-818554 sudo cat /lib/systemd/system/containerd.service │ cilium-818554 │ jenkins │ v1.37.0 │ 10 Jan 26 02:31 UTC │ │
│ ssh │ -p cilium-818554 sudo cat /etc/containerd/config.toml │ cilium-818554 │ jenkins │ v1.37.0 │ 10 Jan 26 02:31 UTC │ │
│ ssh │ -p cilium-818554 sudo containerd config dump │ cilium-818554 │ jenkins │ v1.37.0 │ 10 Jan 26 02:31 UTC │ │
│ ssh │ -p cilium-818554 sudo systemctl status crio --all --full --no-pager │ cilium-818554 │ jenkins │ v1.37.0 │ 10 Jan 26 02:31 UTC │ │
│ ssh │ -p cilium-818554 sudo systemctl cat crio --no-pager │ cilium-818554 │ jenkins │ v1.37.0 │ 10 Jan 26 02:31 UTC │ │
│ delete │ -p offline-docker-420658 │ offline-docker-420658 │ jenkins │ v1.37.0 │ 10 Jan 26 02:31 UTC │ 10 Jan 26 02:31 UTC │
│ ssh │ -p cilium-818554 sudo find /etc/crio -type f -exec sh -c 'echo {}; cat {}' \; │ cilium-818554 │ jenkins │ v1.37.0 │ 10 Jan 26 02:31 UTC │ │
│ ssh │ -p cilium-818554 sudo crio config │ cilium-818554 │ jenkins │ v1.37.0 │ 10 Jan 26 02:31 UTC │ │
│ delete │ -p cilium-818554 │ cilium-818554 │ jenkins │ v1.37.0 │ 10 Jan 26 02:31 UTC │ 10 Jan 26 02:31 UTC │
│ start │ -p force-systemd-env-405089 --memory=3072 --alsologtostderr -v=5 --driver=docker --container-runtime=docker │ force-systemd-env-405089 │ jenkins │ v1.37.0 │ 10 Jan 26 02:31 UTC │ │
│ start │ -p force-systemd-flag-389625 --memory=3072 --force-systemd --alsologtostderr -v=5 --driver=docker --container-runtime=docker │ force-systemd-flag-389625 │ jenkins │ v1.37.0 │ 10 Jan 26 02:31 UTC │ │
│ ssh │ force-systemd-env-405089 ssh docker info --format {{.CgroupDriver}} │ force-systemd-env-405089 │ jenkins │ v1.37.0 │ 10 Jan 26 02:39 UTC │ 10 Jan 26 02:39 UTC │
│ ssh │ force-systemd-flag-389625 ssh docker info --format {{.CgroupDriver}} │ force-systemd-flag-389625 │ jenkins │ v1.37.0 │ 10 Jan 26 02:39 UTC │ 10 Jan 26 02:39 UTC │
│ delete │ -p force-systemd-env-405089 │ force-systemd-env-405089 │ jenkins │ v1.37.0 │ 10 Jan 26 02:39 UTC │ │
└─────────┴───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴───────────────────────────┴─────────┴─────────┴─────────────────────┴─────────────────────┘
==> Last Start <==
Log file created at: 2026/01/10 02:31:31
Running on machine: ip-172-31-29-130
Binary: Built with gc go1.25.5 for linux/arm64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I0110 02:31:31.403273 2444942 out.go:360] Setting OutFile to fd 1 ...
I0110 02:31:31.403569 2444942 out.go:408] TERM=,COLORTERM=, which probably does not support color
I0110 02:31:31.403599 2444942 out.go:374] Setting ErrFile to fd 2...
I0110 02:31:31.403618 2444942 out.go:408] TERM=,COLORTERM=, which probably does not support color
I0110 02:31:31.403919 2444942 root.go:338] Updating PATH: /home/jenkins/minikube-integration/22414-2221005/.minikube/bin
I0110 02:31:31.404424 2444942 out.go:368] Setting JSON to false
I0110 02:31:31.405395 2444942 start.go:133] hostinfo: {"hostname":"ip-172-31-29-130","uptime":36841,"bootTime":1767975451,"procs":160,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1084-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"36adf542-ef4f-4e2d-a0c8-6868d1383ff9"}
I0110 02:31:31.405497 2444942 start.go:143] virtualization:
I0110 02:31:31.408819 2444942 out.go:179] * [force-systemd-flag-389625] minikube v1.37.0 on Ubuntu 20.04 (arm64)
I0110 02:31:31.412885 2444942 out.go:179] - MINIKUBE_LOCATION=22414
I0110 02:31:31.412964 2444942 notify.go:221] Checking for updates...
I0110 02:31:31.425190 2444942 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I0110 02:31:31.428163 2444942 out.go:179] - KUBECONFIG=/home/jenkins/minikube-integration/22414-2221005/kubeconfig
I0110 02:31:31.431030 2444942 out.go:179] - MINIKUBE_HOME=/home/jenkins/minikube-integration/22414-2221005/.minikube
I0110 02:31:31.433941 2444942 out.go:179] - MINIKUBE_BIN=out/minikube-linux-arm64
I0110 02:31:31.436853 2444942 out.go:179] - MINIKUBE_FORCE_SYSTEMD=
I0110 02:31:31.440329 2444942 config.go:182] Loaded profile config "force-systemd-env-405089": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.35.0
I0110 02:31:31.440445 2444942 driver.go:422] Setting default libvirt URI to qemu:///system
I0110 02:31:31.473277 2444942 docker.go:124] docker version: linux-28.1.1:Docker Engine - Community
I0110 02:31:31.473389 2444942 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0110 02:31:31.569510 2444942 info.go:266] docker info: {ID:U5VK:ZNT5:35M3:FHLW:Q7TL:ELFX:BNAG:AV4T:UD2H:SK5L:SEJV:SJJL Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:35 OomKillDisable:true NGoroutines:59 SystemTime:2026-01-10 02:31:31.559356986 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214835200 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-29-130 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx P
ath:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
I0110 02:31:31.569623 2444942 docker.go:319] overlay module found
I0110 02:31:31.577195 2444942 out.go:179] * Using the docker driver based on user configuration
I0110 02:31:31.580216 2444942 start.go:309] selected driver: docker
I0110 02:31:31.580239 2444942 start.go:928] validating driver "docker" against <nil>
I0110 02:31:31.580254 2444942 start.go:939] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I0110 02:31:31.580972 2444942 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0110 02:31:31.685470 2444942 info.go:266] docker info: {ID:U5VK:ZNT5:35M3:FHLW:Q7TL:ELFX:BNAG:AV4T:UD2H:SK5L:SEJV:SJJL Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:35 OomKillDisable:true NGoroutines:59 SystemTime:2026-01-10 02:31:31.673022095 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1084-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214835200 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-29-130 Labels:[] ExperimentalBuild:false ServerVersion:28.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:05044ec0a9a75232cad458027ca83437aae3f4da Expected:} RuncCommit:{ID:v1.2.5-0-g59923ef Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx P
ath:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.23.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.35.1]] Warnings:<nil>}}
I0110 02:31:31.685622 2444942 start_flags.go:333] no existing cluster config was found, will generate one from the flags
I0110 02:31:31.685842 2444942 start_flags.go:1001] Wait components to verify : map[apiserver:true system_pods:true]
I0110 02:31:31.695072 2444942 out.go:179] * Using Docker driver with root privileges
I0110 02:31:31.704472 2444942 cni.go:84] Creating CNI manager for ""
I0110 02:31:31.704566 2444942 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0110 02:31:31.704582 2444942 start_flags.go:342] Found "bridge CNI" CNI - setting NetworkPlugin=cni
I0110 02:31:31.704671 2444942 start.go:353] cluster config:
{Name:force-systemd-flag-389625 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1767944074-22401@sha256:5af296c365892fa7c4c61cd02bf3cdb33e2c362939e717d7686924b3b3f07773 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.35.0 ClusterName:force-systemd-flag-389625 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluste
r.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.35.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s Rosetta:false}
I0110 02:31:31.716615 2444942 out.go:179] * Starting "force-systemd-flag-389625" primary control-plane node in "force-systemd-flag-389625" cluster
I0110 02:31:31.725232 2444942 cache.go:134] Beginning downloading kic base image for docker with docker
I0110 02:31:31.731542 2444942 out.go:179] * Pulling base image v0.0.48-1767944074-22401 ...
I0110 02:31:31.734740 2444942 preload.go:188] Checking if preload exists for k8s version v1.35.0 and runtime docker
I0110 02:31:31.734792 2444942 preload.go:203] Found local preload: /home/jenkins/minikube-integration/22414-2221005/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.35.0-docker-overlay2-arm64.tar.lz4
I0110 02:31:31.734803 2444942 cache.go:65] Caching tarball of preloaded images
I0110 02:31:31.734922 2444942 preload.go:251] Found /home/jenkins/minikube-integration/22414-2221005/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.35.0-docker-overlay2-arm64.tar.lz4 in cache, skipping download
I0110 02:31:31.734933 2444942 cache.go:68] Finished verifying existence of preloaded tar for v1.35.0 on docker
I0110 02:31:31.735052 2444942 profile.go:143] Saving config to /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/config.json ...
I0110 02:31:31.735070 2444942 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/config.json: {Name:mkf231dfddb62b8df14c42136e70d1c72c396e08 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0110 02:31:31.735223 2444942 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1767944074-22401@sha256:5af296c365892fa7c4c61cd02bf3cdb33e2c362939e717d7686924b3b3f07773 in local docker daemon
I0110 02:31:31.768290 2444942 image.go:100] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1767944074-22401@sha256:5af296c365892fa7c4c61cd02bf3cdb33e2c362939e717d7686924b3b3f07773 in local docker daemon, skipping pull
I0110 02:31:31.768314 2444942 cache.go:158] gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1767944074-22401@sha256:5af296c365892fa7c4c61cd02bf3cdb33e2c362939e717d7686924b3b3f07773 exists in daemon, skipping load
I0110 02:31:31.768329 2444942 cache.go:243] Successfully downloaded all kic artifacts
I0110 02:31:31.768360 2444942 start.go:360] acquireMachinesLock for force-systemd-flag-389625: {Name:mkda4641748142b11aadec6867161d872c9610a8 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0110 02:31:31.768468 2444942 start.go:364] duration metric: took 88.236µs to acquireMachinesLock for "force-systemd-flag-389625"
I0110 02:31:31.768503 2444942 start.go:93] Provisioning new machine with config: &{Name:force-systemd-flag-389625 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1767944074-22401@sha256:5af296c365892fa7c4c61cd02bf3cdb33e2c362939e717d7686924b3b3f07773 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.35.0 ClusterName:force-systemd-flag-389625 Namespace:default APIServer
HAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.35.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: Static
IP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s Rosetta:false} &{Name: IP: Port:8443 KubernetesVersion:v1.35.0 ContainerRuntime:docker ControlPlane:true Worker:true}
I0110 02:31:31.768575 2444942 start.go:125] createHost starting for "" (driver="docker")
I0110 02:31:29.585409 2444124 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I0110 02:31:29.585634 2444124 start.go:159] libmachine.API.Create for "force-systemd-env-405089" (driver="docker")
I0110 02:31:29.585669 2444124 client.go:173] LocalClient.Create starting
I0110 02:31:29.585728 2444124 main.go:144] libmachine: Reading certificate data from /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/ca.pem
I0110 02:31:29.585764 2444124 main.go:144] libmachine: Decoding PEM data...
I0110 02:31:29.585784 2444124 main.go:144] libmachine: Parsing certificate...
I0110 02:31:29.585842 2444124 main.go:144] libmachine: Reading certificate data from /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/cert.pem
I0110 02:31:29.585863 2444124 main.go:144] libmachine: Decoding PEM data...
I0110 02:31:29.585883 2444124 main.go:144] libmachine: Parsing certificate...
I0110 02:31:29.586231 2444124 cli_runner.go:164] Run: docker network inspect force-systemd-env-405089 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W0110 02:31:29.610121 2444124 cli_runner.go:211] docker network inspect force-systemd-env-405089 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I0110 02:31:29.610191 2444124 network_create.go:284] running [docker network inspect force-systemd-env-405089] to gather additional debugging logs...
I0110 02:31:29.610221 2444124 cli_runner.go:164] Run: docker network inspect force-systemd-env-405089
W0110 02:31:29.644159 2444124 cli_runner.go:211] docker network inspect force-systemd-env-405089 returned with exit code 1
I0110 02:31:29.644186 2444124 network_create.go:287] error running [docker network inspect force-systemd-env-405089]: docker network inspect force-systemd-env-405089: exit status 1
stdout:
[]
stderr:
Error response from daemon: network force-systemd-env-405089 not found
I0110 02:31:29.644198 2444124 network_create.go:289] output of [docker network inspect force-systemd-env-405089]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network force-systemd-env-405089 not found
** /stderr **
I0110 02:31:29.644302 2444124 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0110 02:31:29.676112 2444124 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-eeafa1ec40c7 IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:52:dd:85:54:7e:14} reservation:<nil>}
I0110 02:31:29.676635 2444124 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-0306382db894 IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:9a:0a:12:a6:69:af} reservation:<nil>}
I0110 02:31:29.676947 2444124 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-42f1ed7cacde IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:66:5d:25:88:ef:ef} reservation:<nil>}
I0110 02:31:29.677429 2444124 network.go:211] skipping subnet 192.168.76.0/24 that is taken: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName:br-d6c9be719dc1 IfaceIPv4:192.168.76.1 IfaceMTU:1500 IfaceMAC:46:8d:64:6b:58:be} reservation:<nil>}
I0110 02:31:29.678964 2444124 network.go:206] using free private subnet 192.168.85.0/24: &{IP:192.168.85.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.85.0/24 Gateway:192.168.85.1 ClientMin:192.168.85.2 ClientMax:192.168.85.254 Broadcast:192.168.85.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0x4001a5e090}
I0110 02:31:29.679020 2444124 network_create.go:124] attempt to create docker network force-systemd-env-405089 192.168.85.0/24 with gateway 192.168.85.1 and MTU of 1500 ...
I0110 02:31:29.679130 2444124 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.85.0/24 --gateway=192.168.85.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=force-systemd-env-405089 force-systemd-env-405089
I0110 02:31:29.775823 2444124 network_create.go:108] docker network force-systemd-env-405089 192.168.85.0/24 created
I0110 02:31:29.775860 2444124 kic.go:121] calculated static IP "192.168.85.2" for the "force-systemd-env-405089" container
I0110 02:31:29.775934 2444124 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0110 02:31:29.794158 2444124 cli_runner.go:164] Run: docker volume create force-systemd-env-405089 --label name.minikube.sigs.k8s.io=force-systemd-env-405089 --label created_by.minikube.sigs.k8s.io=true
I0110 02:31:29.812548 2444124 oci.go:103] Successfully created a docker volume force-systemd-env-405089
I0110 02:31:29.812646 2444124 cli_runner.go:164] Run: docker run --rm --name force-systemd-env-405089-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=force-systemd-env-405089 --entrypoint /usr/bin/test -v force-systemd-env-405089:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1767944074-22401@sha256:5af296c365892fa7c4c61cd02bf3cdb33e2c362939e717d7686924b3b3f07773 -d /var/lib
I0110 02:31:30.375187 2444124 oci.go:107] Successfully prepared a docker volume force-systemd-env-405089
I0110 02:31:30.375254 2444124 preload.go:188] Checking if preload exists for k8s version v1.35.0 and runtime docker
I0110 02:31:30.375264 2444124 kic.go:194] Starting extracting preloaded images to volume ...
I0110 02:31:30.375340 2444124 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/22414-2221005/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.35.0-docker-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v force-systemd-env-405089:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1767944074-22401@sha256:5af296c365892fa7c4c61cd02bf3cdb33e2c362939e717d7686924b3b3f07773 -I lz4 -xf /preloaded.tar -C /extractDir
I0110 02:31:33.218633 2444124 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/22414-2221005/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.35.0-docker-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v force-systemd-env-405089:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1767944074-22401@sha256:5af296c365892fa7c4c61cd02bf3cdb33e2c362939e717d7686924b3b3f07773 -I lz4 -xf /preloaded.tar -C /extractDir: (2.843257443s)
I0110 02:31:33.218668 2444124 kic.go:203] duration metric: took 2.843399774s to extract preloaded images to volume ...
W0110 02:31:33.218794 2444124 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I0110 02:31:33.218913 2444124 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0110 02:31:33.308593 2444124 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname force-systemd-env-405089 --name force-systemd-env-405089 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=force-systemd-env-405089 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=force-systemd-env-405089 --network force-systemd-env-405089 --ip 192.168.85.2 --volume force-systemd-env-405089:/var --security-opt apparmor=unconfined --memory=3072mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1767944074-22401@sha256:5af296c365892fa7c4c61cd02bf3cdb33e2c362939e717d7686924b3b3f07773
I0110 02:31:33.809884 2444124 cli_runner.go:164] Run: docker container inspect force-systemd-env-405089 --format={{.State.Running}}
I0110 02:31:33.863227 2444124 cli_runner.go:164] Run: docker container inspect force-systemd-env-405089 --format={{.State.Status}}
I0110 02:31:33.899538 2444124 cli_runner.go:164] Run: docker exec force-systemd-env-405089 stat /var/lib/dpkg/alternatives/iptables
I0110 02:31:33.973139 2444124 oci.go:144] the created container "force-systemd-env-405089" has a running status.
I0110 02:31:33.973175 2444124 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/22414-2221005/.minikube/machines/force-systemd-env-405089/id_rsa...
I0110 02:31:34.190131 2444124 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/machines/force-systemd-env-405089/id_rsa.pub -> /home/docker/.ssh/authorized_keys
I0110 02:31:34.190189 2444124 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/22414-2221005/.minikube/machines/force-systemd-env-405089/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0110 02:31:31.770687 2444942 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ...
I0110 02:31:31.770958 2444942 start.go:159] libmachine.API.Create for "force-systemd-flag-389625" (driver="docker")
I0110 02:31:31.770996 2444942 client.go:173] LocalClient.Create starting
I0110 02:31:31.771061 2444942 main.go:144] libmachine: Reading certificate data from /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/ca.pem
I0110 02:31:31.771107 2444942 main.go:144] libmachine: Decoding PEM data...
I0110 02:31:31.771131 2444942 main.go:144] libmachine: Parsing certificate...
I0110 02:31:31.771194 2444942 main.go:144] libmachine: Reading certificate data from /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/cert.pem
I0110 02:31:31.771216 2444942 main.go:144] libmachine: Decoding PEM data...
I0110 02:31:31.771231 2444942 main.go:144] libmachine: Parsing certificate...
I0110 02:31:31.771599 2444942 cli_runner.go:164] Run: docker network inspect force-systemd-flag-389625 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W0110 02:31:31.789231 2444942 cli_runner.go:211] docker network inspect force-systemd-flag-389625 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I0110 02:31:31.789311 2444942 network_create.go:284] running [docker network inspect force-systemd-flag-389625] to gather additional debugging logs...
I0110 02:31:31.789330 2444942 cli_runner.go:164] Run: docker network inspect force-systemd-flag-389625
W0110 02:31:31.804491 2444942 cli_runner.go:211] docker network inspect force-systemd-flag-389625 returned with exit code 1
I0110 02:31:31.804519 2444942 network_create.go:287] error running [docker network inspect force-systemd-flag-389625]: docker network inspect force-systemd-flag-389625: exit status 1
stdout:
[]
stderr:
Error response from daemon: network force-systemd-flag-389625 not found
I0110 02:31:31.804531 2444942 network_create.go:289] output of [docker network inspect force-systemd-flag-389625]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network force-systemd-flag-389625 not found
** /stderr **
I0110 02:31:31.804633 2444942 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0110 02:31:31.821447 2444942 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-eeafa1ec40c7 IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:52:dd:85:54:7e:14} reservation:<nil>}
I0110 02:31:31.821788 2444942 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-0306382db894 IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:9a:0a:12:a6:69:af} reservation:<nil>}
I0110 02:31:31.822120 2444942 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-42f1ed7cacde IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:66:5d:25:88:ef:ef} reservation:<nil>}
I0110 02:31:31.822532 2444942 network.go:206] using free private subnet 192.168.76.0/24: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0x4001977430}
I0110 02:31:31.822549 2444942 network_create.go:124] attempt to create docker network force-systemd-flag-389625 192.168.76.0/24 with gateway 192.168.76.1 and MTU of 1500 ...
I0110 02:31:31.822614 2444942 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.76.0/24 --gateway=192.168.76.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=force-systemd-flag-389625 force-systemd-flag-389625
I0110 02:31:31.879729 2444942 network_create.go:108] docker network force-systemd-flag-389625 192.168.76.0/24 created
I0110 02:31:31.879758 2444942 kic.go:121] calculated static IP "192.168.76.2" for the "force-systemd-flag-389625" container
I0110 02:31:31.879830 2444942 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0110 02:31:31.907715 2444942 cli_runner.go:164] Run: docker volume create force-systemd-flag-389625 --label name.minikube.sigs.k8s.io=force-systemd-flag-389625 --label created_by.minikube.sigs.k8s.io=true
I0110 02:31:31.939677 2444942 oci.go:103] Successfully created a docker volume force-systemd-flag-389625
I0110 02:31:31.939777 2444942 cli_runner.go:164] Run: docker run --rm --name force-systemd-flag-389625-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=force-systemd-flag-389625 --entrypoint /usr/bin/test -v force-systemd-flag-389625:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1767944074-22401@sha256:5af296c365892fa7c4c61cd02bf3cdb33e2c362939e717d7686924b3b3f07773 -d /var/lib
I0110 02:31:33.763406 2444942 cli_runner.go:217] Completed: docker run --rm --name force-systemd-flag-389625-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=force-systemd-flag-389625 --entrypoint /usr/bin/test -v force-systemd-flag-389625:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1767944074-22401@sha256:5af296c365892fa7c4c61cd02bf3cdb33e2c362939e717d7686924b3b3f07773 -d /var/lib: (1.823586252s)
I0110 02:31:33.763439 2444942 oci.go:107] Successfully prepared a docker volume force-systemd-flag-389625
I0110 02:31:33.763488 2444942 preload.go:188] Checking if preload exists for k8s version v1.35.0 and runtime docker
I0110 02:31:33.763505 2444942 kic.go:194] Starting extracting preloaded images to volume ...
I0110 02:31:33.763585 2444942 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/22414-2221005/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.35.0-docker-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v force-systemd-flag-389625:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1767944074-22401@sha256:5af296c365892fa7c4c61cd02bf3cdb33e2c362939e717d7686924b3b3f07773 -I lz4 -xf /preloaded.tar -C /extractDir
I0110 02:31:34.225021 2444124 cli_runner.go:164] Run: docker container inspect force-systemd-env-405089 --format={{.State.Status}}
I0110 02:31:34.249136 2444124 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0110 02:31:34.249157 2444124 kic_runner.go:114] Args: [docker exec --privileged force-systemd-env-405089 chown docker:docker /home/docker/.ssh/authorized_keys]
I0110 02:31:34.332008 2444124 cli_runner.go:164] Run: docker container inspect force-systemd-env-405089 --format={{.State.Status}}
I0110 02:31:34.365192 2444124 machine.go:94] provisionDockerMachine start ...
I0110 02:31:34.365297 2444124 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" force-systemd-env-405089
I0110 02:31:34.393974 2444124 main.go:144] libmachine: Using SSH client type: native
I0110 02:31:34.394308 2444124 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x47b2e0] 0x47d7f0 <nil> [] 0s} 127.0.0.1 34981 <nil> <nil>}
I0110 02:31:34.394318 2444124 main.go:144] libmachine: About to run SSH command:
hostname
I0110 02:31:34.394993 2444124 main.go:144] libmachine: Error dialing TCP: ssh: handshake failed: read tcp 127.0.0.1:33838->127.0.0.1:34981: read: connection reset by peer
I0110 02:31:37.568807 2444124 main.go:144] libmachine: SSH cmd err, output: <nil>: force-systemd-env-405089
I0110 02:31:37.568832 2444124 ubuntu.go:182] provisioning hostname "force-systemd-env-405089"
I0110 02:31:37.568912 2444124 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" force-systemd-env-405089
I0110 02:31:37.588249 2444124 main.go:144] libmachine: Using SSH client type: native
I0110 02:31:37.588558 2444124 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x47b2e0] 0x47d7f0 <nil> [] 0s} 127.0.0.1 34981 <nil> <nil>}
I0110 02:31:37.588583 2444124 main.go:144] libmachine: About to run SSH command:
sudo hostname force-systemd-env-405089 && echo "force-systemd-env-405089" | sudo tee /etc/hostname
I0110 02:31:37.746546 2444124 main.go:144] libmachine: SSH cmd err, output: <nil>: force-systemd-env-405089
I0110 02:31:37.746628 2444124 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" force-systemd-env-405089
I0110 02:31:37.767459 2444124 main.go:144] libmachine: Using SSH client type: native
I0110 02:31:37.767772 2444124 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x47b2e0] 0x47d7f0 <nil> [] 0s} 127.0.0.1 34981 <nil> <nil>}
I0110 02:31:37.767794 2444124 main.go:144] libmachine: About to run SSH command:
if ! grep -xq '.*\sforce-systemd-env-405089' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 force-systemd-env-405089/g' /etc/hosts;
else
echo '127.0.1.1 force-systemd-env-405089' | sudo tee -a /etc/hosts;
fi
fi
I0110 02:31:37.917803 2444124 main.go:144] libmachine: SSH cmd err, output: <nil>:
I0110 02:31:37.917839 2444124 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/22414-2221005/.minikube CaCertPath:/home/jenkins/minikube-integration/22414-2221005/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/22414-2221005/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/22414-2221005/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/22414-2221005/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/22414-2221005/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/22414-2221005/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/22414-2221005/.minikube}
I0110 02:31:37.917867 2444124 ubuntu.go:190] setting up certificates
I0110 02:31:37.917878 2444124 provision.go:84] configureAuth start
I0110 02:31:37.917939 2444124 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" force-systemd-env-405089
I0110 02:31:37.936050 2444124 provision.go:143] copyHostCerts
I0110 02:31:37.936093 2444124 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/ca.pem -> /home/jenkins/minikube-integration/22414-2221005/.minikube/ca.pem
I0110 02:31:37.936126 2444124 exec_runner.go:144] found /home/jenkins/minikube-integration/22414-2221005/.minikube/ca.pem, removing ...
I0110 02:31:37.936143 2444124 exec_runner.go:203] rm: /home/jenkins/minikube-integration/22414-2221005/.minikube/ca.pem
I0110 02:31:37.936221 2444124 exec_runner.go:151] cp: /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/22414-2221005/.minikube/ca.pem (1082 bytes)
I0110 02:31:37.936318 2444124 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/cert.pem -> /home/jenkins/minikube-integration/22414-2221005/.minikube/cert.pem
I0110 02:31:37.936341 2444124 exec_runner.go:144] found /home/jenkins/minikube-integration/22414-2221005/.minikube/cert.pem, removing ...
I0110 02:31:37.936350 2444124 exec_runner.go:203] rm: /home/jenkins/minikube-integration/22414-2221005/.minikube/cert.pem
I0110 02:31:37.936386 2444124 exec_runner.go:151] cp: /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/22414-2221005/.minikube/cert.pem (1123 bytes)
I0110 02:31:37.936442 2444124 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/key.pem -> /home/jenkins/minikube-integration/22414-2221005/.minikube/key.pem
I0110 02:31:37.936463 2444124 exec_runner.go:144] found /home/jenkins/minikube-integration/22414-2221005/.minikube/key.pem, removing ...
I0110 02:31:37.936471 2444124 exec_runner.go:203] rm: /home/jenkins/minikube-integration/22414-2221005/.minikube/key.pem
I0110 02:31:37.936496 2444124 exec_runner.go:151] cp: /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/22414-2221005/.minikube/key.pem (1679 bytes)
I0110 02:31:37.936548 2444124 provision.go:117] generating server cert: /home/jenkins/minikube-integration/22414-2221005/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/22414-2221005/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/22414-2221005/.minikube/certs/ca-key.pem org=jenkins.force-systemd-env-405089 san=[127.0.0.1 192.168.85.2 force-systemd-env-405089 localhost minikube]
I0110 02:31:38.258206 2444124 provision.go:177] copyRemoteCerts
I0110 02:31:38.258288 2444124 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0110 02:31:38.258339 2444124 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" force-systemd-env-405089
I0110 02:31:38.276203 2444124 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:34981 SSHKeyPath:/home/jenkins/minikube-integration/22414-2221005/.minikube/machines/force-systemd-env-405089/id_rsa Username:docker}
I0110 02:31:38.381656 2444124 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/ca.pem -> /etc/docker/ca.pem
I0110 02:31:38.381728 2444124 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I0110 02:31:38.400027 2444124 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/machines/server.pem -> /etc/docker/server.pem
I0110 02:31:38.400088 2444124 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/machines/server.pem --> /etc/docker/server.pem (1237 bytes)
I0110 02:31:38.417556 2444124 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/machines/server-key.pem -> /etc/docker/server-key.pem
I0110 02:31:38.417620 2444124 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I0110 02:31:38.438553 2444124 provision.go:87] duration metric: took 520.648879ms to configureAuth
I0110 02:31:38.438640 2444124 ubuntu.go:206] setting minikube options for container-runtime
I0110 02:31:38.438850 2444124 config.go:182] Loaded profile config "force-systemd-env-405089": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.35.0
I0110 02:31:38.438923 2444124 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" force-systemd-env-405089
I0110 02:31:38.456723 2444124 main.go:144] libmachine: Using SSH client type: native
I0110 02:31:38.457166 2444124 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x47b2e0] 0x47d7f0 <nil> [] 0s} 127.0.0.1 34981 <nil> <nil>}
I0110 02:31:38.457186 2444124 main.go:144] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I0110 02:31:38.623956 2444124 main.go:144] libmachine: SSH cmd err, output: <nil>: overlay
I0110 02:31:38.623983 2444124 ubuntu.go:71] root file system type: overlay
I0110 02:31:38.624112 2444124 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I0110 02:31:38.624190 2444124 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" force-systemd-env-405089
I0110 02:31:38.651894 2444124 main.go:144] libmachine: Using SSH client type: native
I0110 02:31:38.652212 2444124 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x47b2e0] 0x47d7f0 <nil> [] 0s} 127.0.0.1 34981 <nil> <nil>}
I0110 02:31:38.652296 2444124 main.go:144] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 \
-H fd:// --containerd=/run/containerd/containerd.sock \
-H unix:///var/run/docker.sock \
--default-ulimit=nofile=1048576:1048576 \
--tlsverify \
--tlscacert /etc/docker/ca.pem \
--tlscert /etc/docker/server.pem \
--tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I0110 02:31:38.832340 2444124 main.go:144] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
I0110 02:31:38.832516 2444124 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" force-systemd-env-405089
I0110 02:31:38.851001 2444124 main.go:144] libmachine: Using SSH client type: native
I0110 02:31:38.851318 2444124 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x47b2e0] 0x47d7f0 <nil> [] 0s} 127.0.0.1 34981 <nil> <nil>}
I0110 02:31:38.851335 2444124 main.go:144] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I0110 02:31:36.676943 2444942 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/22414-2221005/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.35.0-docker-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v force-systemd-flag-389625:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1767944074-22401@sha256:5af296c365892fa7c4c61cd02bf3cdb33e2c362939e717d7686924b3b3f07773 -I lz4 -xf /preloaded.tar -C /extractDir: (2.913316987s)
I0110 02:31:36.676976 2444942 kic.go:203] duration metric: took 2.913468033s to extract preloaded images to volume ...
W0110 02:31:36.677157 2444942 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I0110 02:31:36.677267 2444942 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0110 02:31:36.733133 2444942 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname force-systemd-flag-389625 --name force-systemd-flag-389625 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=force-systemd-flag-389625 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=force-systemd-flag-389625 --network force-systemd-flag-389625 --ip 192.168.76.2 --volume force-systemd-flag-389625:/var --security-opt apparmor=unconfined --memory=3072mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1767944074-22401@sha256:5af296c365892fa7c4c61cd02bf3cdb33e2c362939e717d7686924b3b3f07773
I0110 02:31:37.020083 2444942 cli_runner.go:164] Run: docker container inspect force-systemd-flag-389625 --format={{.State.Running}}
I0110 02:31:37.049554 2444942 cli_runner.go:164] Run: docker container inspect force-systemd-flag-389625 --format={{.State.Status}}
I0110 02:31:37.073410 2444942 cli_runner.go:164] Run: docker exec force-systemd-flag-389625 stat /var/lib/dpkg/alternatives/iptables
I0110 02:31:37.123872 2444942 oci.go:144] the created container "force-systemd-flag-389625" has a running status.
I0110 02:31:37.123914 2444942 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/22414-2221005/.minikube/machines/force-systemd-flag-389625/id_rsa...
I0110 02:31:37.219546 2444942 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/machines/force-systemd-flag-389625/id_rsa.pub -> /home/docker/.ssh/authorized_keys
I0110 02:31:37.219643 2444942 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/22414-2221005/.minikube/machines/force-systemd-flag-389625/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0110 02:31:37.246178 2444942 cli_runner.go:164] Run: docker container inspect force-systemd-flag-389625 --format={{.State.Status}}
I0110 02:31:37.265663 2444942 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0110 02:31:37.265687 2444942 kic_runner.go:114] Args: [docker exec --privileged force-systemd-flag-389625 chown docker:docker /home/docker/.ssh/authorized_keys]
I0110 02:31:37.315490 2444942 cli_runner.go:164] Run: docker container inspect force-systemd-flag-389625 --format={{.State.Status}}
I0110 02:31:37.344025 2444942 machine.go:94] provisionDockerMachine start ...
I0110 02:31:37.344113 2444942 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" force-systemd-flag-389625
I0110 02:31:37.365329 2444942 main.go:144] libmachine: Using SSH client type: native
I0110 02:31:37.366213 2444942 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x47b2e0] 0x47d7f0 <nil> [] 0s} 127.0.0.1 34986 <nil> <nil>}
I0110 02:31:37.366237 2444942 main.go:144] libmachine: About to run SSH command:
hostname
I0110 02:31:37.366917 2444942 main.go:144] libmachine: Error dialing TCP: ssh: handshake failed: EOF
I0110 02:31:40.525424 2444942 main.go:144] libmachine: SSH cmd err, output: <nil>: force-systemd-flag-389625
I0110 02:31:40.525452 2444942 ubuntu.go:182] provisioning hostname "force-systemd-flag-389625"
I0110 02:31:40.525529 2444942 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" force-systemd-flag-389625
I0110 02:31:40.550883 2444942 main.go:144] libmachine: Using SSH client type: native
I0110 02:31:40.551514 2444942 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x47b2e0] 0x47d7f0 <nil> [] 0s} 127.0.0.1 34986 <nil> <nil>}
I0110 02:31:40.551534 2444942 main.go:144] libmachine: About to run SSH command:
sudo hostname force-systemd-flag-389625 && echo "force-systemd-flag-389625" | sudo tee /etc/hostname
I0110 02:31:40.741599 2444942 main.go:144] libmachine: SSH cmd err, output: <nil>: force-systemd-flag-389625
I0110 02:31:40.741787 2444942 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" force-systemd-flag-389625
I0110 02:31:40.769891 2444942 main.go:144] libmachine: Using SSH client type: native
I0110 02:31:40.770349 2444942 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x47b2e0] 0x47d7f0 <nil> [] 0s} 127.0.0.1 34986 <nil> <nil>}
I0110 02:31:40.770376 2444942 main.go:144] libmachine: About to run SSH command:
if ! grep -xq '.*\sforce-systemd-flag-389625' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 force-systemd-flag-389625/g' /etc/hosts;
else
echo '127.0.1.1 force-systemd-flag-389625' | sudo tee -a /etc/hosts;
fi
fi
I0110 02:31:40.933268 2444942 main.go:144] libmachine: SSH cmd err, output: <nil>:
I0110 02:31:40.933300 2444942 ubuntu.go:188] set auth options {CertDir:/home/jenkins/minikube-integration/22414-2221005/.minikube CaCertPath:/home/jenkins/minikube-integration/22414-2221005/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/22414-2221005/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/22414-2221005/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/22414-2221005/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/22414-2221005/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/22414-2221005/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/22414-2221005/.minikube}
I0110 02:31:40.933334 2444942 ubuntu.go:190] setting up certificates
I0110 02:31:40.933344 2444942 provision.go:84] configureAuth start
I0110 02:31:40.933425 2444942 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" force-systemd-flag-389625
I0110 02:31:40.954041 2444942 provision.go:143] copyHostCerts
I0110 02:31:40.954074 2444942 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/ca.pem -> /home/jenkins/minikube-integration/22414-2221005/.minikube/ca.pem
I0110 02:31:40.954109 2444942 exec_runner.go:144] found /home/jenkins/minikube-integration/22414-2221005/.minikube/ca.pem, removing ...
I0110 02:31:40.954115 2444942 exec_runner.go:203] rm: /home/jenkins/minikube-integration/22414-2221005/.minikube/ca.pem
I0110 02:31:40.954187 2444942 exec_runner.go:151] cp: /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/22414-2221005/.minikube/ca.pem (1082 bytes)
I0110 02:31:40.954287 2444942 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/cert.pem -> /home/jenkins/minikube-integration/22414-2221005/.minikube/cert.pem
I0110 02:31:40.954306 2444942 exec_runner.go:144] found /home/jenkins/minikube-integration/22414-2221005/.minikube/cert.pem, removing ...
I0110 02:31:40.954311 2444942 exec_runner.go:203] rm: /home/jenkins/minikube-integration/22414-2221005/.minikube/cert.pem
I0110 02:31:40.954348 2444942 exec_runner.go:151] cp: /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/22414-2221005/.minikube/cert.pem (1123 bytes)
I0110 02:31:40.954426 2444942 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/key.pem -> /home/jenkins/minikube-integration/22414-2221005/.minikube/key.pem
I0110 02:31:40.954443 2444942 exec_runner.go:144] found /home/jenkins/minikube-integration/22414-2221005/.minikube/key.pem, removing ...
I0110 02:31:40.954447 2444942 exec_runner.go:203] rm: /home/jenkins/minikube-integration/22414-2221005/.minikube/key.pem
I0110 02:31:40.954472 2444942 exec_runner.go:151] cp: /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/22414-2221005/.minikube/key.pem (1679 bytes)
I0110 02:31:40.954527 2444942 provision.go:117] generating server cert: /home/jenkins/minikube-integration/22414-2221005/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/22414-2221005/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/22414-2221005/.minikube/certs/ca-key.pem org=jenkins.force-systemd-flag-389625 san=[127.0.0.1 192.168.76.2 force-systemd-flag-389625 localhost minikube]
I0110 02:31:41.170708 2444942 provision.go:177] copyRemoteCerts
I0110 02:31:41.170784 2444942 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0110 02:31:41.170832 2444942 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" force-systemd-flag-389625
I0110 02:31:41.191286 2444942 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:34986 SSHKeyPath:/home/jenkins/minikube-integration/22414-2221005/.minikube/machines/force-systemd-flag-389625/id_rsa Username:docker}
I0110 02:31:41.302379 2444942 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/machines/server.pem -> /etc/docker/server.pem
I0110 02:31:41.302491 2444942 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/machines/server.pem --> /etc/docker/server.pem (1241 bytes)
I0110 02:31:41.325187 2444942 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/machines/server-key.pem -> /etc/docker/server-key.pem
I0110 02:31:41.325316 2444942 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I0110 02:31:41.349568 2444942 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/ca.pem -> /etc/docker/ca.pem
I0110 02:31:41.349680 2444942 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I0110 02:31:41.371181 2444942 provision.go:87] duration metric: took 437.80859ms to configureAuth
I0110 02:31:41.371265 2444942 ubuntu.go:206] setting minikube options for container-runtime
I0110 02:31:41.371507 2444942 config.go:182] Loaded profile config "force-systemd-flag-389625": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.35.0
I0110 02:31:41.371603 2444942 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" force-systemd-flag-389625
I0110 02:31:41.397226 2444942 main.go:144] libmachine: Using SSH client type: native
I0110 02:31:41.397537 2444942 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x47b2e0] 0x47d7f0 <nil> [] 0s} 127.0.0.1 34986 <nil> <nil>}
I0110 02:31:41.397547 2444942 main.go:144] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I0110 02:31:39.848898 2444124 main.go:144] libmachine: SSH cmd err, output: <nil>: --- /lib/systemd/system/docker.service 2025-12-12 14:49:02.000000000 +0000
+++ /lib/systemd/system/docker.service.new 2026-01-10 02:31:38.826649162 +0000
@@ -9,23 +9,34 @@
[Service]
Type=notify
-# the default is not to use systemd for cgroups because the delegate issues still
-# exists and systemd currently does not support the cgroup feature set required
-# for containers run by docker
-ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
-ExecReload=/bin/kill -s HUP $MAINPID
-TimeoutStartSec=0
-RestartSec=2
Restart=always
+
+
+# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
+# The base configuration already specifies an 'ExecStart=...' command. The first directive
+# here is to clear out that command inherited from the base configuration. Without this,
+# the command from the base configuration and the command specified here are treated as
+# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
+# will catch this invalid input and refuse to start the service with an error like:
+# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
+
+# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
+# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
+ExecStart=
+ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
+ExecReload=/bin/kill -s HUP $MAINPID
+
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
+LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
-# Comment TasksMax if your systemd version does not support it.
-# Only systemd 226 and above support this option.
+# Uncomment TasksMax if your systemd version supports it.
+# Only systemd 226 and above support this version.
TasksMax=infinity
+TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
Synchronizing state of docker.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable docker
I0110 02:31:39.848925 2444124 machine.go:97] duration metric: took 5.483705976s to provisionDockerMachine
I0110 02:31:39.848938 2444124 client.go:176] duration metric: took 10.263257466s to LocalClient.Create
I0110 02:31:39.848983 2444124 start.go:167] duration metric: took 10.263350347s to libmachine.API.Create "force-systemd-env-405089"
I0110 02:31:39.848999 2444124 start.go:293] postStartSetup for "force-systemd-env-405089" (driver="docker")
I0110 02:31:39.849010 2444124 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0110 02:31:39.849143 2444124 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0110 02:31:39.849190 2444124 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" force-systemd-env-405089
I0110 02:31:39.867772 2444124 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:34981 SSHKeyPath:/home/jenkins/minikube-integration/22414-2221005/.minikube/machines/force-systemd-env-405089/id_rsa Username:docker}
I0110 02:31:39.969324 2444124 ssh_runner.go:195] Run: cat /etc/os-release
I0110 02:31:39.972690 2444124 main.go:144] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0110 02:31:39.972719 2444124 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm)
I0110 02:31:39.972731 2444124 filesync.go:126] Scanning /home/jenkins/minikube-integration/22414-2221005/.minikube/addons for local assets ...
I0110 02:31:39.972810 2444124 filesync.go:126] Scanning /home/jenkins/minikube-integration/22414-2221005/.minikube/files for local assets ...
I0110 02:31:39.972927 2444124 filesync.go:149] local asset: /home/jenkins/minikube-integration/22414-2221005/.minikube/files/etc/ssl/certs/22228772.pem -> 22228772.pem in /etc/ssl/certs
I0110 02:31:39.972937 2444124 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/files/etc/ssl/certs/22228772.pem -> /etc/ssl/certs/22228772.pem
I0110 02:31:39.973066 2444124 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I0110 02:31:39.981882 2444124 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/files/etc/ssl/certs/22228772.pem --> /etc/ssl/certs/22228772.pem (1708 bytes)
I0110 02:31:40.000017 2444124 start.go:296] duration metric: took 151.001946ms for postStartSetup
I0110 02:31:40.000404 2444124 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" force-systemd-env-405089
I0110 02:31:40.038533 2444124 profile.go:143] Saving config to /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-env-405089/config.json ...
I0110 02:31:40.038894 2444124 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0110 02:31:40.038954 2444124 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" force-systemd-env-405089
I0110 02:31:40.057310 2444124 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:34981 SSHKeyPath:/home/jenkins/minikube-integration/22414-2221005/.minikube/machines/force-systemd-env-405089/id_rsa Username:docker}
I0110 02:31:40.158291 2444124 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0110 02:31:40.163210 2444124 start.go:128] duration metric: took 10.581191023s to createHost
I0110 02:31:40.163237 2444124 start.go:83] releasing machines lock for "force-systemd-env-405089", held for 10.581321237s
I0110 02:31:40.163309 2444124 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" force-systemd-env-405089
I0110 02:31:40.180948 2444124 ssh_runner.go:195] Run: cat /version.json
I0110 02:31:40.181013 2444124 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" force-systemd-env-405089
I0110 02:31:40.181219 2444124 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0110 02:31:40.181281 2444124 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" force-systemd-env-405089
I0110 02:31:40.201769 2444124 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:34981 SSHKeyPath:/home/jenkins/minikube-integration/22414-2221005/.minikube/machines/force-systemd-env-405089/id_rsa Username:docker}
I0110 02:31:40.209162 2444124 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:34981 SSHKeyPath:/home/jenkins/minikube-integration/22414-2221005/.minikube/machines/force-systemd-env-405089/id_rsa Username:docker}
I0110 02:31:40.305222 2444124 ssh_runner.go:195] Run: systemctl --version
I0110 02:31:40.413487 2444124 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W0110 02:31:40.417954 2444124 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I0110 02:31:40.418043 2444124 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0110 02:31:40.452568 2444124 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/10-crio-bridge.conflist.disabled] bridge cni config(s)
I0110 02:31:40.452596 2444124 start.go:496] detecting cgroup driver to use...
I0110 02:31:40.452613 2444124 start.go:500] using "systemd" cgroup driver as enforced via flags
I0110 02:31:40.452712 2444124 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0110 02:31:40.470526 2444124 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I0110 02:31:40.482803 2444124 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0110 02:31:40.492346 2444124 containerd.go:147] configuring containerd to use "systemd" as cgroup driver...
I0110 02:31:40.492457 2444124 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I0110 02:31:40.502450 2444124 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0110 02:31:40.511527 2444124 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0110 02:31:40.520445 2444124 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0110 02:31:40.540761 2444124 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0110 02:31:40.551850 2444124 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0110 02:31:40.563654 2444124 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0110 02:31:40.574796 2444124 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0110 02:31:40.585488 2444124 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0110 02:31:40.595355 2444124 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0110 02:31:40.609803 2444124 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0110 02:31:40.744003 2444124 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0110 02:31:40.876690 2444124 start.go:496] detecting cgroup driver to use...
I0110 02:31:40.876724 2444124 start.go:500] using "systemd" cgroup driver as enforced via flags
I0110 02:31:40.876779 2444124 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I0110 02:31:40.904144 2444124 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I0110 02:31:40.918264 2444124 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
I0110 02:31:40.953661 2444124 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I0110 02:31:40.974405 2444124 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0110 02:31:40.989753 2444124 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I0110 02:31:41.013689 2444124 ssh_runner.go:195] Run: which cri-dockerd
I0110 02:31:41.018251 2444124 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I0110 02:31:41.027476 2444124 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (192 bytes)
I0110 02:31:41.042305 2444124 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I0110 02:31:41.204191 2444124 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I0110 02:31:41.332172 2444124 docker.go:578] configuring docker to use "systemd" as cgroup driver...
I0110 02:31:41.332275 2444124 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (129 bytes)
I0110 02:31:41.346373 2444124 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
I0110 02:31:41.360708 2444124 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0110 02:31:41.514222 2444124 ssh_runner.go:195] Run: sudo systemctl restart docker
I0110 02:31:42.063171 2444124 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I0110 02:31:42.079374 2444124 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I0110 02:31:42.101258 2444124 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0110 02:31:42.121381 2444124 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I0110 02:31:42.317076 2444124 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I0110 02:31:42.488596 2444124 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0110 02:31:42.651589 2444124 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I0110 02:31:42.669531 2444124 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I0110 02:31:42.687478 2444124 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0110 02:31:42.822336 2444124 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I0110 02:31:42.917629 2444124 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0110 02:31:42.937980 2444124 start.go:553] Will wait 60s for socket path /var/run/cri-dockerd.sock
I0110 02:31:42.938103 2444124 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I0110 02:31:42.943727 2444124 start.go:574] Will wait 60s for crictl version
I0110 02:31:42.943794 2444124 ssh_runner.go:195] Run: which crictl
I0110 02:31:42.948403 2444124 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version
I0110 02:31:42.980867 2444124 start.go:590] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 29.1.3
RuntimeApiVersion: v1
I0110 02:31:42.980939 2444124 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0110 02:31:43.004114 2444124 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0110 02:31:43.042145 2444124 out.go:252] * Preparing Kubernetes v1.35.0 on Docker 29.1.3 ...
I0110 02:31:43.042280 2444124 cli_runner.go:164] Run: docker network inspect force-systemd-env-405089 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0110 02:31:43.065360 2444124 ssh_runner.go:195] Run: grep 192.168.85.1 host.minikube.internal$ /etc/hosts
I0110 02:31:43.069214 2444124 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.85.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0110 02:31:43.081864 2444124 kubeadm.go:884] updating cluster {Name:force-systemd-env-405089 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1767944074-22401@sha256:5af296c365892fa7c4c61cd02bf3cdb33e2c362939e717d7686924b3b3f07773 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.35.0 ClusterName:force-systemd-env-405089 Namespace:default APIServerHAVIP: APIServerName:
minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.35.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAu
thSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s Rosetta:false} ...
I0110 02:31:43.081980 2444124 preload.go:188] Checking if preload exists for k8s version v1.35.0 and runtime docker
I0110 02:31:43.082036 2444124 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0110 02:31:43.100460 2444124 docker.go:694] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.35.0
registry.k8s.io/kube-scheduler:v1.35.0
registry.k8s.io/kube-controller-manager:v1.35.0
registry.k8s.io/kube-proxy:v1.35.0
registry.k8s.io/etcd:3.6.6-0
registry.k8s.io/coredns/coredns:v1.13.1
registry.k8s.io/pause:3.10.1
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I0110 02:31:43.100486 2444124 docker.go:624] Images already preloaded, skipping extraction
I0110 02:31:43.100552 2444124 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0110 02:31:43.121230 2444124 docker.go:694] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.35.0
registry.k8s.io/kube-controller-manager:v1.35.0
registry.k8s.io/kube-scheduler:v1.35.0
registry.k8s.io/kube-proxy:v1.35.0
registry.k8s.io/etcd:3.6.6-0
registry.k8s.io/coredns/coredns:v1.13.1
registry.k8s.io/pause:3.10.1
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I0110 02:31:43.121256 2444124 cache_images.go:86] Images are preloaded, skipping loading
I0110 02:31:43.121266 2444124 kubeadm.go:935] updating node { 192.168.85.2 8443 v1.35.0 docker true true} ...
I0110 02:31:43.121361 2444124 kubeadm.go:947] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.35.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=force-systemd-env-405089 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.85.2
[Install]
config:
{KubernetesVersion:v1.35.0 ClusterName:force-systemd-env-405089 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0110 02:31:43.121432 2444124 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
I0110 02:31:43.178538 2444124 cni.go:84] Creating CNI manager for ""
I0110 02:31:43.178570 2444124 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0110 02:31:43.178597 2444124 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I0110 02:31:43.178618 2444124 kubeadm.go:197] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.85.2 APIServerPort:8443 KubernetesVersion:v1.35.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:force-systemd-env-405089 NodeName:force-systemd-env-405089 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.85.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.85.2 CgroupDriver:systemd ClientCAFile:/var/lib/minikube/certs/ca.crt Sta
ticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I0110 02:31:43.178739 2444124 kubeadm.go:203] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.85.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
name: "force-systemd-env-405089"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.85.2"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.85.2"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.35.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I0110 02:31:43.178809 2444124 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.35.0
I0110 02:31:43.186967 2444124 binaries.go:51] Found k8s binaries, skipping transfer
I0110 02:31:43.187037 2444124 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I0110 02:31:43.196792 2444124 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (323 bytes)
I0110 02:31:43.210260 2444124 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0110 02:31:43.225215 2444124 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2224 bytes)
I0110 02:31:43.239490 2444124 ssh_runner.go:195] Run: grep 192.168.85.2 control-plane.minikube.internal$ /etc/hosts
I0110 02:31:43.243821 2444124 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.85.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0110 02:31:43.256336 2444124 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0110 02:31:43.411763 2444124 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0110 02:31:43.449071 2444124 certs.go:69] Setting up /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-env-405089 for IP: 192.168.85.2
I0110 02:31:43.449090 2444124 certs.go:195] generating shared ca certs ...
I0110 02:31:43.449107 2444124 certs.go:227] acquiring lock for ca certs: {Name:mk3365aee58ca444945faa08aa6e1c1a1b730f09 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0110 02:31:43.449242 2444124 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/22414-2221005/.minikube/ca.key
I0110 02:31:43.449285 2444124 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/22414-2221005/.minikube/proxy-client-ca.key
I0110 02:31:43.449293 2444124 certs.go:257] generating profile certs ...
I0110 02:31:43.449348 2444124 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-env-405089/client.key
I0110 02:31:43.449359 2444124 crypto.go:68] Generating cert /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-env-405089/client.crt with IP's: []
I0110 02:31:44.085771 2444124 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-env-405089/client.crt ...
I0110 02:31:44.085806 2444124 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-env-405089/client.crt: {Name:mkef9124ceed79304369528c5a27c7648b78a9cd Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0110 02:31:44.086085 2444124 crypto.go:164] Writing key to /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-env-405089/client.key ...
I0110 02:31:44.086119 2444124 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-env-405089/client.key: {Name:mk76e85724a13af463ddacfcf286ac686d149ee6 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0110 02:31:44.086302 2444124 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-env-405089/apiserver.key.6f34228b
I0110 02:31:44.086324 2444124 crypto.go:68] Generating cert /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-env-405089/apiserver.crt.6f34228b with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.85.2]
I0110 02:31:44.498570 2444124 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-env-405089/apiserver.crt.6f34228b ...
I0110 02:31:44.498600 2444124 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-env-405089/apiserver.crt.6f34228b: {Name:mk4617469fd5fea335a0e87bd3a6539b7da9cd53 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0110 02:31:44.498789 2444124 crypto.go:164] Writing key to /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-env-405089/apiserver.key.6f34228b ...
I0110 02:31:44.498804 2444124 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-env-405089/apiserver.key.6f34228b: {Name:mkb480b1d60b5ebb03b826d7d02dfd7e44510312 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0110 02:31:44.498902 2444124 certs.go:382] copying /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-env-405089/apiserver.crt.6f34228b -> /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-env-405089/apiserver.crt
I0110 02:31:44.498990 2444124 certs.go:386] copying /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-env-405089/apiserver.key.6f34228b -> /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-env-405089/apiserver.key
I0110 02:31:44.499054 2444124 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-env-405089/proxy-client.key
I0110 02:31:44.499073 2444124 crypto.go:68] Generating cert /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-env-405089/proxy-client.crt with IP's: []
I0110 02:31:44.994504 2444124 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-env-405089/proxy-client.crt ...
I0110 02:31:44.994541 2444124 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-env-405089/proxy-client.crt: {Name:mkcf4f6fccba9f412afa8632ad4d0d2e51e05241 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0110 02:31:44.995667 2444124 crypto.go:164] Writing key to /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-env-405089/proxy-client.key ...
I0110 02:31:44.995695 2444124 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-env-405089/proxy-client.key: {Name:mka4f25cc07eefbd88194e70f96e9c6a66c304c8 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0110 02:31:44.995867 2444124 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/ca.crt -> /var/lib/minikube/certs/ca.crt
I0110 02:31:44.995917 2444124 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/ca.key -> /var/lib/minikube/certs/ca.key
I0110 02:31:44.995937 2444124 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/proxy-client-ca.crt -> /var/lib/minikube/certs/proxy-client-ca.crt
I0110 02:31:44.995956 2444124 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/proxy-client-ca.key -> /var/lib/minikube/certs/proxy-client-ca.key
I0110 02:31:44.995969 2444124 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-env-405089/apiserver.crt -> /var/lib/minikube/certs/apiserver.crt
I0110 02:31:44.996009 2444124 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-env-405089/apiserver.key -> /var/lib/minikube/certs/apiserver.key
I0110 02:31:44.996029 2444124 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-env-405089/proxy-client.crt -> /var/lib/minikube/certs/proxy-client.crt
I0110 02:31:44.996041 2444124 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-env-405089/proxy-client.key -> /var/lib/minikube/certs/proxy-client.key
I0110 02:31:44.996117 2444124 certs.go:484] found cert: /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/2222877.pem (1338 bytes)
W0110 02:31:44.996176 2444124 certs.go:480] ignoring /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/2222877_empty.pem, impossibly tiny 0 bytes
I0110 02:31:44.996191 2444124 certs.go:484] found cert: /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/ca-key.pem (1675 bytes)
I0110 02:31:44.996234 2444124 certs.go:484] found cert: /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/ca.pem (1082 bytes)
I0110 02:31:44.996282 2444124 certs.go:484] found cert: /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/cert.pem (1123 bytes)
I0110 02:31:44.996317 2444124 certs.go:484] found cert: /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/key.pem (1679 bytes)
I0110 02:31:44.996397 2444124 certs.go:484] found cert: /home/jenkins/minikube-integration/22414-2221005/.minikube/files/etc/ssl/certs/22228772.pem (1708 bytes)
I0110 02:31:44.996456 2444124 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/files/etc/ssl/certs/22228772.pem -> /usr/share/ca-certificates/22228772.pem
I0110 02:31:44.996487 2444124 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/ca.crt -> /usr/share/ca-certificates/minikubeCA.pem
I0110 02:31:44.996506 2444124 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/2222877.pem -> /usr/share/ca-certificates/2222877.pem
I0110 02:31:44.997128 2444124 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0110 02:31:45.025285 2444124 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
I0110 02:31:45.117344 2444124 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0110 02:31:45.154410 2444124 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I0110 02:31:45.182384 2444124 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-env-405089/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1436 bytes)
I0110 02:31:45.209059 2444124 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-env-405089/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
I0110 02:31:45.237573 2444124 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-env-405089/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0110 02:31:45.267287 2444124 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-env-405089/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I0110 02:31:45.300586 2444124 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/files/etc/ssl/certs/22228772.pem --> /usr/share/ca-certificates/22228772.pem (1708 bytes)
I0110 02:31:45.325792 2444124 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0110 02:31:45.348897 2444124 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/2222877.pem --> /usr/share/ca-certificates/2222877.pem (1338 bytes)
I0110 02:31:45.369906 2444124 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (722 bytes)
I0110 02:31:45.388123 2444124 ssh_runner.go:195] Run: openssl version
I0110 02:31:45.396122 2444124 ssh_runner.go:195] Run: sudo test -s /usr/share/ca-certificates/22228772.pem
I0110 02:31:45.406059 2444124 ssh_runner.go:195] Run: sudo ln -fs /usr/share/ca-certificates/22228772.pem /etc/ssl/certs/22228772.pem
I0110 02:31:45.416032 2444124 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/22228772.pem
I0110 02:31:45.424294 2444124 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Jan 10 02:00 /usr/share/ca-certificates/22228772.pem
I0110 02:31:45.424422 2444124 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/22228772.pem
I0110 02:31:45.470580 2444124 ssh_runner.go:195] Run: sudo test -L /etc/ssl/certs/3ec20f2e.0
I0110 02:31:45.479156 2444124 ssh_runner.go:195] Run: sudo ln -fs /etc/ssl/certs/22228772.pem /etc/ssl/certs/3ec20f2e.0
I0110 02:31:45.487771 2444124 ssh_runner.go:195] Run: sudo test -s /usr/share/ca-certificates/minikubeCA.pem
I0110 02:31:45.496463 2444124 ssh_runner.go:195] Run: sudo ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem
I0110 02:31:45.504778 2444124 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0110 02:31:45.510041 2444124 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Jan 10 01:54 /usr/share/ca-certificates/minikubeCA.pem
I0110 02:31:45.510161 2444124 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0110 02:31:45.562186 2444124 ssh_runner.go:195] Run: sudo test -L /etc/ssl/certs/b5213941.0
I0110 02:31:45.570693 2444124 ssh_runner.go:195] Run: sudo ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0
I0110 02:31:45.585612 2444124 ssh_runner.go:195] Run: sudo test -s /usr/share/ca-certificates/2222877.pem
I0110 02:31:45.595614 2444124 ssh_runner.go:195] Run: sudo ln -fs /usr/share/ca-certificates/2222877.pem /etc/ssl/certs/2222877.pem
I0110 02:31:45.604471 2444124 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/2222877.pem
I0110 02:31:45.609923 2444124 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Jan 10 02:00 /usr/share/ca-certificates/2222877.pem
I0110 02:31:45.609994 2444124 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/2222877.pem
I0110 02:31:45.652960 2444124 ssh_runner.go:195] Run: sudo test -L /etc/ssl/certs/51391683.0
I0110 02:31:45.665449 2444124 ssh_runner.go:195] Run: sudo ln -fs /etc/ssl/certs/2222877.pem /etc/ssl/certs/51391683.0
I0110 02:31:45.674251 2444124 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0110 02:31:45.678274 2444124 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I0110 02:31:45.678327 2444124 kubeadm.go:401] StartCluster: {Name:force-systemd-env-405089 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1767944074-22401@sha256:5af296c365892fa7c4c61cd02bf3cdb33e2c362939e717d7686924b3b3f07773 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.35.0 ClusterName:force-systemd-env-405089 Namespace:default APIServerHAVIP: APIServerName:min
ikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.85.2 Port:8443 KubernetesVersion:v1.35.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthS
ock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s Rosetta:false}
I0110 02:31:45.678449 2444124 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I0110 02:31:45.696897 2444124 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I0110 02:31:45.715111 2444124 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I0110 02:31:45.737562 2444124 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I0110 02:31:45.737624 2444124 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I0110 02:31:45.753944 2444124 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I0110 02:31:45.754029 2444124 kubeadm.go:158] found existing configuration files:
I0110 02:31:45.754124 2444124 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I0110 02:31:45.767074 2444124 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I0110 02:31:45.767192 2444124 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I0110 02:31:45.775076 2444124 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I0110 02:31:45.783839 2444124 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I0110 02:31:45.783955 2444124 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I0110 02:31:45.791557 2444124 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I0110 02:31:45.800017 2444124 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I0110 02:31:45.800155 2444124 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I0110 02:31:45.807549 2444124 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I0110 02:31:45.815839 2444124 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I0110 02:31:45.815967 2444124 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I0110 02:31:45.823557 2444124 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I0110 02:31:45.879151 2444124 kubeadm.go:319] [init] Using Kubernetes version: v1.35.0
I0110 02:31:45.879584 2444124 kubeadm.go:319] [preflight] Running pre-flight checks
I0110 02:31:45.979502 2444124 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I0110 02:31:45.979676 2444124 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1084-aws[0m
I0110 02:31:45.979749 2444124 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I0110 02:31:45.979833 2444124 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I0110 02:31:45.979918 2444124 kubeadm.go:319] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I0110 02:31:45.979997 2444124 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I0110 02:31:45.980082 2444124 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I0110 02:31:45.980163 2444124 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I0110 02:31:45.980247 2444124 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I0110 02:31:45.980325 2444124 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I0110 02:31:45.980413 2444124 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I0110 02:31:45.980515 2444124 kubeadm.go:319] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I0110 02:31:46.069019 2444124 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I0110 02:31:46.069217 2444124 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I0110 02:31:46.069354 2444124 kubeadm.go:319] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I0110 02:31:46.086323 2444124 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I0110 02:31:41.564217 2444942 main.go:144] libmachine: SSH cmd err, output: <nil>: overlay
I0110 02:31:41.564316 2444942 ubuntu.go:71] root file system type: overlay
I0110 02:31:41.564502 2444942 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I0110 02:31:41.564636 2444942 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" force-systemd-flag-389625
I0110 02:31:41.591765 2444942 main.go:144] libmachine: Using SSH client type: native
I0110 02:31:41.592086 2444942 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x47b2e0] 0x47d7f0 <nil> [] 0s} 127.0.0.1 34986 <nil> <nil>}
I0110 02:31:41.592175 2444942 main.go:144] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 \
-H fd:// --containerd=/run/containerd/containerd.sock \
-H unix:///var/run/docker.sock \
--default-ulimit=nofile=1048576:1048576 \
--tlsverify \
--tlscacert /etc/docker/ca.pem \
--tlscert /etc/docker/server.pem \
--tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I0110 02:31:41.761531 2444942 main.go:144] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=always
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
I0110 02:31:41.761616 2444942 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" force-systemd-flag-389625
I0110 02:31:41.782449 2444942 main.go:144] libmachine: Using SSH client type: native
I0110 02:31:41.782827 2444942 main.go:144] libmachine: &{{{<nil> 0 [] [] []} docker [0x47b2e0] 0x47d7f0 <nil> [] 0s} 127.0.0.1 34986 <nil> <nil>}
I0110 02:31:41.782851 2444942 main.go:144] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I0110 02:31:43.042474 2444942 main.go:144] libmachine: SSH cmd err, output: <nil>: --- /lib/systemd/system/docker.service 2025-12-12 14:49:02.000000000 +0000
+++ /lib/systemd/system/docker.service.new 2026-01-10 02:31:41.754593192 +0000
@@ -9,23 +9,34 @@
[Service]
Type=notify
-# the default is not to use systemd for cgroups because the delegate issues still
-# exists and systemd currently does not support the cgroup feature set required
-# for containers run by docker
-ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
-ExecReload=/bin/kill -s HUP $MAINPID
-TimeoutStartSec=0
-RestartSec=2
Restart=always
+
+
+# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
+# The base configuration already specifies an 'ExecStart=...' command. The first directive
+# here is to clear out that command inherited from the base configuration. Without this,
+# the command from the base configuration and the command specified here are treated as
+# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
+# will catch this invalid input and refuse to start the service with an error like:
+# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
+
+# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
+# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
+ExecStart=
+ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
+ExecReload=/bin/kill -s HUP $MAINPID
+
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
+LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
-# Comment TasksMax if your systemd version does not support it.
-# Only systemd 226 and above support this option.
+# Uncomment TasksMax if your systemd version supports it.
+# Only systemd 226 and above support this version.
TasksMax=infinity
+TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
Synchronizing state of docker.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable docker
I0110 02:31:43.042496 2444942 machine.go:97] duration metric: took 5.698448584s to provisionDockerMachine
I0110 02:31:43.042508 2444942 client.go:176] duration metric: took 11.271502022s to LocalClient.Create
I0110 02:31:43.042522 2444942 start.go:167] duration metric: took 11.271565709s to libmachine.API.Create "force-systemd-flag-389625"
I0110 02:31:43.042529 2444942 start.go:293] postStartSetup for "force-systemd-flag-389625" (driver="docker")
I0110 02:31:43.042539 2444942 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0110 02:31:43.042594 2444942 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0110 02:31:43.042629 2444942 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" force-systemd-flag-389625
I0110 02:31:43.076614 2444942 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:34986 SSHKeyPath:/home/jenkins/minikube-integration/22414-2221005/.minikube/machines/force-systemd-flag-389625/id_rsa Username:docker}
I0110 02:31:43.196482 2444942 ssh_runner.go:195] Run: cat /etc/os-release
I0110 02:31:43.201700 2444942 main.go:144] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0110 02:31:43.201726 2444942 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm)
I0110 02:31:43.201737 2444942 filesync.go:126] Scanning /home/jenkins/minikube-integration/22414-2221005/.minikube/addons for local assets ...
I0110 02:31:43.201796 2444942 filesync.go:126] Scanning /home/jenkins/minikube-integration/22414-2221005/.minikube/files for local assets ...
I0110 02:31:43.201877 2444942 filesync.go:149] local asset: /home/jenkins/minikube-integration/22414-2221005/.minikube/files/etc/ssl/certs/22228772.pem -> 22228772.pem in /etc/ssl/certs
I0110 02:31:43.201885 2444942 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/files/etc/ssl/certs/22228772.pem -> /etc/ssl/certs/22228772.pem
I0110 02:31:43.201986 2444942 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I0110 02:31:43.214196 2444942 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/files/etc/ssl/certs/22228772.pem --> /etc/ssl/certs/22228772.pem (1708 bytes)
I0110 02:31:43.241904 2444942 start.go:296] duration metric: took 199.360809ms for postStartSetup
I0110 02:31:43.242273 2444942 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" force-systemd-flag-389625
I0110 02:31:43.263273 2444942 profile.go:143] Saving config to /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/config.json ...
I0110 02:31:43.263543 2444942 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0110 02:31:43.263584 2444942 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" force-systemd-flag-389625
I0110 02:31:43.283380 2444942 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:34986 SSHKeyPath:/home/jenkins/minikube-integration/22414-2221005/.minikube/machines/force-systemd-flag-389625/id_rsa Username:docker}
I0110 02:31:43.391153 2444942 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0110 02:31:43.396781 2444942 start.go:128] duration metric: took 11.628189455s to createHost
I0110 02:31:43.396804 2444942 start.go:83] releasing machines lock for "force-systemd-flag-389625", held for 11.628322055s
I0110 02:31:43.396875 2444942 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" force-systemd-flag-389625
I0110 02:31:43.415596 2444942 ssh_runner.go:195] Run: cat /version.json
I0110 02:31:43.415661 2444942 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" force-systemd-flag-389625
I0110 02:31:43.415925 2444942 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0110 02:31:43.415983 2444942 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" force-systemd-flag-389625
I0110 02:31:43.442514 2444942 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:34986 SSHKeyPath:/home/jenkins/minikube-integration/22414-2221005/.minikube/machines/force-systemd-flag-389625/id_rsa Username:docker}
I0110 02:31:43.477676 2444942 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:34986 SSHKeyPath:/home/jenkins/minikube-integration/22414-2221005/.minikube/machines/force-systemd-flag-389625/id_rsa Username:docker}
I0110 02:31:43.711077 2444942 ssh_runner.go:195] Run: systemctl --version
I0110 02:31:43.721326 2444942 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W0110 02:31:43.726734 2444942 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I0110 02:31:43.726807 2444942 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0110 02:31:43.760612 2444942 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/10-crio-bridge.conflist.disabled] bridge cni config(s)
I0110 02:31:43.760636 2444942 start.go:496] detecting cgroup driver to use...
I0110 02:31:43.760650 2444942 start.go:500] using "systemd" cgroup driver as enforced via flags
I0110 02:31:43.760747 2444942 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0110 02:31:43.776486 2444942 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml"
I0110 02:31:43.785831 2444942 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0110 02:31:43.795047 2444942 containerd.go:147] configuring containerd to use "systemd" as cgroup driver...
I0110 02:31:43.795106 2444942 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml"
I0110 02:31:43.804716 2444942 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0110 02:31:43.814084 2444942 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0110 02:31:43.823155 2444942 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0110 02:31:43.832515 2444942 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0110 02:31:43.841283 2444942 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0110 02:31:43.850677 2444942 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0110 02:31:43.859949 2444942 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0110 02:31:43.869426 2444942 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0110 02:31:43.878026 2444942 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0110 02:31:43.886454 2444942 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0110 02:31:44.030564 2444942 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0110 02:31:44.134281 2444942 start.go:496] detecting cgroup driver to use...
I0110 02:31:44.134314 2444942 start.go:500] using "systemd" cgroup driver as enforced via flags
I0110 02:31:44.134390 2444942 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I0110 02:31:44.164357 2444942 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I0110 02:31:44.178141 2444942 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
I0110 02:31:44.203502 2444942 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I0110 02:31:44.225293 2444942 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0110 02:31:44.259875 2444942 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I0110 02:31:44.298197 2444942 ssh_runner.go:195] Run: which cri-dockerd
I0110 02:31:44.302282 2444942 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I0110 02:31:44.310035 2444942 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (192 bytes)
I0110 02:31:44.323184 2444942 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I0110 02:31:44.479958 2444942 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I0110 02:31:44.628745 2444942 docker.go:578] configuring docker to use "systemd" as cgroup driver...
I0110 02:31:44.628855 2444942 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (129 bytes)
I0110 02:31:44.646424 2444942 ssh_runner.go:195] Run: sudo systemctl reset-failed docker
I0110 02:31:44.659407 2444942 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0110 02:31:44.806969 2444942 ssh_runner.go:195] Run: sudo systemctl restart docker
I0110 02:31:45.429132 2444942 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I0110 02:31:45.449741 2444942 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I0110 02:31:45.466128 2444942 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0110 02:31:45.483936 2444942 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I0110 02:31:45.652722 2444942 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I0110 02:31:45.851372 2444942 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0110 02:31:46.020791 2444942 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I0110 02:31:46.040175 2444942 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service
I0110 02:31:46.054245 2444942 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0110 02:31:46.202922 2444942 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I0110 02:31:46.282568 2444942 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0110 02:31:46.299250 2444942 start.go:553] Will wait 60s for socket path /var/run/cri-dockerd.sock
I0110 02:31:46.299324 2444942 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I0110 02:31:46.304150 2444942 start.go:574] Will wait 60s for crictl version
I0110 02:31:46.304219 2444942 ssh_runner.go:195] Run: which crictl
I0110 02:31:46.309882 2444942 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version
I0110 02:31:46.365333 2444942 start.go:590] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 29.1.3
RuntimeApiVersion: v1
I0110 02:31:46.365407 2444942 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0110 02:31:46.397294 2444942 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0110 02:31:46.430776 2444942 out.go:252] * Preparing Kubernetes v1.35.0 on Docker 29.1.3 ...
I0110 02:31:46.430856 2444942 cli_runner.go:164] Run: docker network inspect force-systemd-flag-389625 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0110 02:31:46.446745 2444942 ssh_runner.go:195] Run: grep 192.168.76.1 host.minikube.internal$ /etc/hosts
I0110 02:31:46.450899 2444942 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.76.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0110 02:31:46.460438 2444942 kubeadm.go:884] updating cluster {Name:force-systemd-flag-389625 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1767944074-22401@sha256:5af296c365892fa7c4c61cd02bf3cdb33e2c362939e717d7686924b3b3f07773 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.35.0 ClusterName:force-systemd-flag-389625 Namespace:default APIServerHAVIP: APIServerNam
e:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.35.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSH
AuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s Rosetta:false} ...
I0110 02:31:46.460546 2444942 preload.go:188] Checking if preload exists for k8s version v1.35.0 and runtime docker
I0110 02:31:46.460598 2444942 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0110 02:31:46.482795 2444942 docker.go:694] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.35.0
registry.k8s.io/kube-scheduler:v1.35.0
registry.k8s.io/kube-controller-manager:v1.35.0
registry.k8s.io/kube-proxy:v1.35.0
registry.k8s.io/etcd:3.6.6-0
registry.k8s.io/coredns/coredns:v1.13.1
registry.k8s.io/pause:3.10.1
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I0110 02:31:46.482816 2444942 docker.go:624] Images already preloaded, skipping extraction
I0110 02:31:46.482894 2444942 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0110 02:31:46.503709 2444942 docker.go:694] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.35.0
registry.k8s.io/kube-scheduler:v1.35.0
registry.k8s.io/kube-controller-manager:v1.35.0
registry.k8s.io/kube-proxy:v1.35.0
registry.k8s.io/etcd:3.6.6-0
registry.k8s.io/coredns/coredns:v1.13.1
registry.k8s.io/pause:3.10.1
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I0110 02:31:46.503732 2444942 cache_images.go:86] Images are preloaded, skipping loading
I0110 02:31:46.503741 2444942 kubeadm.go:935] updating node { 192.168.76.2 8443 v1.35.0 docker true true} ...
I0110 02:31:46.503828 2444942 kubeadm.go:947] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.35.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=force-systemd-flag-389625 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.76.2
[Install]
config:
{KubernetesVersion:v1.35.0 ClusterName:force-systemd-flag-389625 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0110 02:31:46.503890 2444942 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
I0110 02:31:46.568277 2444942 cni.go:84] Creating CNI manager for ""
I0110 02:31:46.568357 2444942 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0110 02:31:46.568393 2444942 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16
I0110 02:31:46.568445 2444942 kubeadm.go:197] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.76.2 APIServerPort:8443 KubernetesVersion:v1.35.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:force-systemd-flag-389625 NodeName:force-systemd-flag-389625 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.76.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.76.2 CgroupDriver:systemd ClientCAFile:/var/lib/minikube/certs/ca.crt S
taticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I0110 02:31:46.568620 2444942 kubeadm.go:203] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.76.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
name: "force-systemd-flag-389625"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.76.2"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.76.2"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
kubernetesVersion: v1.35.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I0110 02:31:46.568728 2444942 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.35.0
I0110 02:31:46.576738 2444942 binaries.go:51] Found k8s binaries, skipping transfer
I0110 02:31:46.576804 2444942 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I0110 02:31:46.584333 2444942 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (324 bytes)
I0110 02:31:46.597086 2444942 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0110 02:31:46.609903 2444942 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2225 bytes)
I0110 02:31:46.623198 2444942 ssh_runner.go:195] Run: grep 192.168.76.2 control-plane.minikube.internal$ /etc/hosts
I0110 02:31:46.627340 2444942 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.76.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0110 02:31:46.637410 2444942 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0110 02:31:46.813351 2444942 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0110 02:31:46.853529 2444942 certs.go:69] Setting up /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625 for IP: 192.168.76.2
I0110 02:31:46.853605 2444942 certs.go:195] generating shared ca certs ...
I0110 02:31:46.853636 2444942 certs.go:227] acquiring lock for ca certs: {Name:mk3365aee58ca444945faa08aa6e1c1a1b730f09 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0110 02:31:46.853847 2444942 certs.go:236] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/22414-2221005/.minikube/ca.key
I0110 02:31:46.853930 2444942 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/22414-2221005/.minikube/proxy-client-ca.key
I0110 02:31:46.853957 2444942 certs.go:257] generating profile certs ...
I0110 02:31:46.854046 2444942 certs.go:364] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/client.key
I0110 02:31:46.854089 2444942 crypto.go:68] Generating cert /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/client.crt with IP's: []
I0110 02:31:46.947349 2444942 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/client.crt ...
I0110 02:31:46.947424 2444942 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/client.crt: {Name:mkc2a0e18aeb9bc161a2b7bdc69edce7c225059e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0110 02:31:46.947656 2444942 crypto.go:164] Writing key to /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/client.key ...
I0110 02:31:46.947692 2444942 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/client.key: {Name:mkbec37be7fe98f01eeac1efcff3341ee3c0872e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0110 02:31:46.947838 2444942 certs.go:364] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/apiserver.key.754ddc11
I0110 02:31:46.947881 2444942 crypto.go:68] Generating cert /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/apiserver.crt.754ddc11 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.76.2]
I0110 02:31:47.211172 2444942 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/apiserver.crt.754ddc11 ...
I0110 02:31:47.211243 2444942 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/apiserver.crt.754ddc11: {Name:mkb26b4fa8a855d6ab75cf6ae5986179421e433d Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0110 02:31:47.211463 2444942 crypto.go:164] Writing key to /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/apiserver.key.754ddc11 ...
I0110 02:31:47.211500 2444942 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/apiserver.key.754ddc11: {Name:mkaede7629652a36b550448eb511dc667db770a6 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0110 02:31:47.211648 2444942 certs.go:382] copying /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/apiserver.crt.754ddc11 -> /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/apiserver.crt
I0110 02:31:47.211795 2444942 certs.go:386] copying /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/apiserver.key.754ddc11 -> /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/apiserver.key
I0110 02:31:47.211904 2444942 certs.go:364] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/proxy-client.key
I0110 02:31:47.211947 2444942 crypto.go:68] Generating cert /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/proxy-client.crt with IP's: []
I0110 02:31:47.431675 2444942 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/proxy-client.crt ...
I0110 02:31:47.431751 2444942 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/proxy-client.crt: {Name:mkf0c56bc6a962d35ef411e8b1db0da0dee06e76 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0110 02:31:47.431961 2444942 crypto.go:164] Writing key to /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/proxy-client.key ...
I0110 02:31:47.431997 2444942 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/proxy-client.key: {Name:mk1b1a2249d88d087b490ca8bc1af9bab6c5cd65 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0110 02:31:47.432136 2444942 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/ca.crt -> /var/lib/minikube/certs/ca.crt
I0110 02:31:47.432180 2444942 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/ca.key -> /var/lib/minikube/certs/ca.key
I0110 02:31:47.432212 2444942 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/proxy-client-ca.crt -> /var/lib/minikube/certs/proxy-client-ca.crt
I0110 02:31:47.432258 2444942 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/proxy-client-ca.key -> /var/lib/minikube/certs/proxy-client-ca.key
I0110 02:31:47.432293 2444942 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/apiserver.crt -> /var/lib/minikube/certs/apiserver.crt
I0110 02:31:47.432322 2444942 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/apiserver.key -> /var/lib/minikube/certs/apiserver.key
I0110 02:31:47.432364 2444942 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/proxy-client.crt -> /var/lib/minikube/certs/proxy-client.crt
I0110 02:31:47.432398 2444942 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/proxy-client.key -> /var/lib/minikube/certs/proxy-client.key
I0110 02:31:47.432482 2444942 certs.go:484] found cert: /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/2222877.pem (1338 bytes)
W0110 02:31:47.432539 2444942 certs.go:480] ignoring /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/2222877_empty.pem, impossibly tiny 0 bytes
I0110 02:31:47.432564 2444942 certs.go:484] found cert: /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/ca-key.pem (1675 bytes)
I0110 02:31:47.432623 2444942 certs.go:484] found cert: /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/ca.pem (1082 bytes)
I0110 02:31:47.432673 2444942 certs.go:484] found cert: /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/cert.pem (1123 bytes)
I0110 02:31:47.432730 2444942 certs.go:484] found cert: /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/key.pem (1679 bytes)
I0110 02:31:47.432801 2444942 certs.go:484] found cert: /home/jenkins/minikube-integration/22414-2221005/.minikube/files/etc/ssl/certs/22228772.pem (1708 bytes)
I0110 02:31:47.432861 2444942 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/ca.crt -> /usr/share/ca-certificates/minikubeCA.pem
I0110 02:31:47.432896 2444942 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/2222877.pem -> /usr/share/ca-certificates/2222877.pem
I0110 02:31:47.432926 2444942 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/22414-2221005/.minikube/files/etc/ssl/certs/22228772.pem -> /usr/share/ca-certificates/22228772.pem
I0110 02:31:47.433610 2444942 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0110 02:31:47.453555 2444942 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
I0110 02:31:47.472772 2444942 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0110 02:31:47.493487 2444942 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I0110 02:31:47.513383 2444942 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1436 bytes)
I0110 02:31:47.534626 2444942 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I0110 02:31:47.554446 2444942 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0110 02:31:47.574178 2444942 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/profiles/force-systemd-flag-389625/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I0110 02:31:47.594420 2444942 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0110 02:31:47.614798 2444942 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/certs/2222877.pem --> /usr/share/ca-certificates/2222877.pem (1338 bytes)
I0110 02:31:47.635266 2444942 ssh_runner.go:362] scp /home/jenkins/minikube-integration/22414-2221005/.minikube/files/etc/ssl/certs/22228772.pem --> /usr/share/ca-certificates/22228772.pem (1708 bytes)
I0110 02:31:47.655406 2444942 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (722 bytes)
I0110 02:31:47.670021 2444942 ssh_runner.go:195] Run: openssl version
I0110 02:31:47.676614 2444942 ssh_runner.go:195] Run: sudo test -s /usr/share/ca-certificates/minikubeCA.pem
I0110 02:31:47.684815 2444942 ssh_runner.go:195] Run: sudo ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem
I0110 02:31:47.693216 2444942 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0110 02:31:47.697583 2444942 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Jan 10 01:54 /usr/share/ca-certificates/minikubeCA.pem
I0110 02:31:47.697646 2444942 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0110 02:31:47.771210 2444942 ssh_runner.go:195] Run: sudo test -L /etc/ssl/certs/b5213941.0
I0110 02:31:47.792458 2444942 ssh_runner.go:195] Run: sudo ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0
I0110 02:31:47.806445 2444942 ssh_runner.go:195] Run: sudo test -s /usr/share/ca-certificates/2222877.pem
I0110 02:31:47.828400 2444942 ssh_runner.go:195] Run: sudo ln -fs /usr/share/ca-certificates/2222877.pem /etc/ssl/certs/2222877.pem
I0110 02:31:47.841461 2444942 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/2222877.pem
I0110 02:31:47.847202 2444942 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Jan 10 02:00 /usr/share/ca-certificates/2222877.pem
I0110 02:31:47.847317 2444942 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/2222877.pem
I0110 02:31:47.889947 2444942 ssh_runner.go:195] Run: sudo test -L /etc/ssl/certs/51391683.0
I0110 02:31:47.898442 2444942 ssh_runner.go:195] Run: sudo ln -fs /etc/ssl/certs/2222877.pem /etc/ssl/certs/51391683.0
I0110 02:31:47.910391 2444942 ssh_runner.go:195] Run: sudo test -s /usr/share/ca-certificates/22228772.pem
I0110 02:31:47.918871 2444942 ssh_runner.go:195] Run: sudo ln -fs /usr/share/ca-certificates/22228772.pem /etc/ssl/certs/22228772.pem
I0110 02:31:47.928363 2444942 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/22228772.pem
I0110 02:31:47.932866 2444942 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Jan 10 02:00 /usr/share/ca-certificates/22228772.pem
I0110 02:31:47.932981 2444942 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/22228772.pem
I0110 02:31:47.975611 2444942 ssh_runner.go:195] Run: sudo test -L /etc/ssl/certs/3ec20f2e.0
I0110 02:31:47.984122 2444942 ssh_runner.go:195] Run: sudo ln -fs /etc/ssl/certs/22228772.pem /etc/ssl/certs/3ec20f2e.0
I0110 02:31:47.992727 2444942 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0110 02:31:47.997508 2444942 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I0110 02:31:47.997608 2444942 kubeadm.go:401] StartCluster: {Name:force-systemd-flag-389625 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1767944074-22401@sha256:5af296c365892fa7c4c61cd02bf3cdb33e2c362939e717d7686924b3b3f07773 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.35.0 ClusterName:force-systemd-flag-389625 Namespace:default APIServerHAVIP: APIServerName:m
inikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.35.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAut
hSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s Rosetta:false}
I0110 02:31:47.997780 2444942 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I0110 02:31:48.015607 2444942 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I0110 02:31:48.027609 2444942 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I0110 02:31:48.037195 2444942 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I0110 02:31:48.037364 2444942 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I0110 02:31:48.049830 2444942 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I0110 02:31:48.049901 2444942 kubeadm.go:158] found existing configuration files:
I0110 02:31:48.049986 2444942 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I0110 02:31:48.059872 2444942 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I0110 02:31:48.059993 2444942 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I0110 02:31:48.068889 2444942 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I0110 02:31:48.079048 2444942 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I0110 02:31:48.079166 2444942 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I0110 02:31:48.088092 2444942 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I0110 02:31:48.098007 2444942 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I0110 02:31:48.098121 2444942 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I0110 02:31:48.107267 2444942 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I0110 02:31:48.117920 2444942 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I0110 02:31:48.118032 2444942 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I0110 02:31:48.127917 2444942 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I0110 02:31:48.180767 2444942 kubeadm.go:319] [init] Using Kubernetes version: v1.35.0
I0110 02:31:48.180909 2444942 kubeadm.go:319] [preflight] Running pre-flight checks
I0110 02:31:48.290339 2444942 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I0110 02:31:48.290624 2444942 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1084-aws[0m
I0110 02:31:48.290676 2444942 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I0110 02:31:48.290728 2444942 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I0110 02:31:48.290780 2444942 kubeadm.go:319] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I0110 02:31:48.290831 2444942 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I0110 02:31:48.290894 2444942 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I0110 02:31:48.290946 2444942 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I0110 02:31:48.291013 2444942 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I0110 02:31:48.291064 2444942 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I0110 02:31:48.291119 2444942 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I0110 02:31:48.291170 2444942 kubeadm.go:319] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I0110 02:31:48.376921 2444942 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I0110 02:31:48.377171 2444942 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I0110 02:31:48.377352 2444942 kubeadm.go:319] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I0110 02:31:48.409493 2444942 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I0110 02:31:46.092497 2444124 out.go:252] - Generating certificates and keys ...
I0110 02:31:46.092669 2444124 kubeadm.go:319] [certs] Using existing ca certificate authority
I0110 02:31:46.092770 2444124 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I0110 02:31:46.875771 2444124 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
I0110 02:31:47.144364 2444124 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
I0110 02:31:47.314724 2444124 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I0110 02:31:47.984584 2444124 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I0110 02:31:48.242134 2444124 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I0110 02:31:48.242499 2444124 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [force-systemd-env-405089 localhost] and IPs [192.168.85.2 127.0.0.1 ::1]
I0110 02:31:48.461465 2444124 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I0110 02:31:48.461631 2444124 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [force-systemd-env-405089 localhost] and IPs [192.168.85.2 127.0.0.1 ::1]
I0110 02:31:48.733504 2444124 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I0110 02:31:48.861496 2444124 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I0110 02:31:49.185510 2444124 kubeadm.go:319] [certs] Generating "sa" key and public key
I0110 02:31:49.185598 2444124 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I0110 02:31:49.425584 2444124 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I0110 02:31:49.777471 2444124 kubeadm.go:319] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I0110 02:31:49.961468 2444124 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I0110 02:31:50.177454 2444124 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I0110 02:31:50.374241 2444124 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I0110 02:31:50.374970 2444124 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I0110 02:31:50.381331 2444124 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I0110 02:31:48.416465 2444942 out.go:252] - Generating certificates and keys ...
I0110 02:31:48.416688 2444942 kubeadm.go:319] [certs] Using existing ca certificate authority
I0110 02:31:48.416848 2444942 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I0110 02:31:48.613948 2444942 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key
I0110 02:31:49.073506 2444942 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key
I0110 02:31:49.428686 2444942 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key
I0110 02:31:49.712507 2444942 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key
I0110 02:31:49.836655 2444942 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key
I0110 02:31:49.837353 2444942 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [force-systemd-flag-389625 localhost] and IPs [192.168.76.2 127.0.0.1 ::1]
I0110 02:31:50.119233 2444942 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key
I0110 02:31:50.120016 2444942 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [force-systemd-flag-389625 localhost] and IPs [192.168.76.2 127.0.0.1 ::1]
I0110 02:31:50.479427 2444942 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key
I0110 02:31:50.633494 2444942 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key
I0110 02:31:50.705818 2444942 kubeadm.go:319] [certs] Generating "sa" key and public key
I0110 02:31:50.706064 2444942 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I0110 02:31:50.768089 2444942 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I0110 02:31:50.918537 2444942 kubeadm.go:319] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I0110 02:31:51.105411 2444942 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I0110 02:31:51.794074 2444942 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I0110 02:31:52.020214 2444942 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I0110 02:31:52.020319 2444942 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I0110 02:31:52.025960 2444942 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I0110 02:31:50.384846 2444124 out.go:252] - Booting up control plane ...
I0110 02:31:50.384957 2444124 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I0110 02:31:50.385056 2444124 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I0110 02:31:50.385129 2444124 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I0110 02:31:50.414088 2444124 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I0110 02:31:50.414228 2444124 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
I0110 02:31:50.422787 2444124 kubeadm.go:319] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
I0110 02:31:50.423116 2444124 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I0110 02:31:50.423172 2444124 kubeadm.go:319] [kubelet-start] Starting the kubelet
I0110 02:31:50.599415 2444124 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I0110 02:31:50.599570 2444124 kubeadm.go:319] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I0110 02:31:52.029579 2444942 out.go:252] - Booting up control plane ...
I0110 02:31:52.029696 2444942 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I0110 02:31:52.030816 2444942 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I0110 02:31:52.032102 2444942 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I0110 02:31:52.049145 2444942 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I0110 02:31:52.049263 2444942 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
I0110 02:31:52.057814 2444942 kubeadm.go:319] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
I0110 02:31:52.058122 2444942 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I0110 02:31:52.058167 2444942 kubeadm.go:319] [kubelet-start] Starting the kubelet
I0110 02:31:52.196343 2444942 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I0110 02:31:52.196468 2444942 kubeadm.go:319] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I0110 02:35:50.600577 2444124 kubeadm.go:319] [kubelet-check] The kubelet is not healthy after 4m0.001263977s
I0110 02:35:50.601197 2444124 kubeadm.go:319]
I0110 02:35:50.601279 2444124 kubeadm.go:319] Unfortunately, an error has occurred, likely caused by:
I0110 02:35:50.601345 2444124 kubeadm.go:319] - The kubelet is not running
I0110 02:35:50.601480 2444124 kubeadm.go:319] - The kubelet is unhealthy due to a misconfiguration of the node in some way (required cgroups disabled)
I0110 02:35:50.601496 2444124 kubeadm.go:319]
I0110 02:35:50.601596 2444124 kubeadm.go:319] If you are on a systemd-powered system, you can try to troubleshoot the error with the following commands:
I0110 02:35:50.601630 2444124 kubeadm.go:319] - 'systemctl status kubelet'
I0110 02:35:50.601664 2444124 kubeadm.go:319] - 'journalctl -xeu kubelet'
I0110 02:35:50.601672 2444124 kubeadm.go:319]
I0110 02:35:50.606506 2444124 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1084-aws\n", err: exit status 1
I0110 02:35:50.606929 2444124 kubeadm.go:319] [WARNING SystemVerification]: cgroups v1 support is deprecated and will be removed in a future release. Please migrate to cgroups v2. To explicitly enable cgroups v1 support for kubelet v1.35 or newer, you must set the kubelet configuration option 'FailCgroupV1' to 'false'. You must also explicitly skip this validation. For more information, see https://git.k8s.io/enhancements/keps/sig-node/5573-remove-cgroup-v1
I0110 02:35:50.607043 2444124 kubeadm.go:319] [WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I0110 02:35:50.607291 2444124 kubeadm.go:319] error: error execution phase wait-control-plane: failed while waiting for the kubelet to start: The HTTP call equal to 'curl -sSL http://127.0.0.1:10248/healthz' returned error: Get "http://127.0.0.1:10248/healthz": context deadline exceeded
I0110 02:35:50.607301 2444124 kubeadm.go:319]
I0110 02:35:50.607370 2444124 kubeadm.go:319] To see the stack trace of this error execute with --v=5 or higher
W0110 02:35:50.607511 2444124 out.go:285] ! initialization failed, will try again: wait: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables": Process exited with status 1
stdout:
[init] Using Kubernetes version: v1.35.0
[preflight] Running pre-flight checks
[preflight] The system verification failed. Printing the output from the verification:
[0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1084-aws[0m
[0;37mOS[0m: [0;32mLinux[0m
[0;37mCGROUPS_CPU[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
[0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
[0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
[0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
[0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
[0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
[0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/var/lib/minikube/certs"
[certs] Using existing ca certificate authority
[certs] Using existing apiserver certificate and key on disk
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [force-systemd-env-405089 localhost] and IPs [192.168.85.2 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [force-systemd-env-405089 localhost] and IPs [192.168.85.2 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "super-admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
[patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
[kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
[kubelet-check] The kubelet is not healthy after 4m0.001263977s
Unfortunately, an error has occurred, likely caused by:
- The kubelet is not running
- The kubelet is unhealthy due to a misconfiguration of the node in some way (required cgroups disabled)
If you are on a systemd-powered system, you can try to troubleshoot the error with the following commands:
- 'systemctl status kubelet'
- 'journalctl -xeu kubelet'
stderr:
[WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1084-aws\n", err: exit status 1
[WARNING SystemVerification]: cgroups v1 support is deprecated and will be removed in a future release. Please migrate to cgroups v2. To explicitly enable cgroups v1 support for kubelet v1.35 or newer, you must set the kubelet configuration option 'FailCgroupV1' to 'false'. You must also explicitly skip this validation. For more information, see https://git.k8s.io/enhancements/keps/sig-node/5573-remove-cgroup-v1
[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
error: error execution phase wait-control-plane: failed while waiting for the kubelet to start: The HTTP call equal to 'curl -sSL http://127.0.0.1:10248/healthz' returned error: Get "http://127.0.0.1:10248/healthz": context deadline exceeded
To see the stack trace of this error execute with --v=5 or higher
I0110 02:35:50.607594 2444124 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm reset --cri-socket /var/run/cri-dockerd.sock --force"
I0110 02:35:51.030219 2444124 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I0110 02:35:51.043577 2444124 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I0110 02:35:51.043642 2444124 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I0110 02:35:51.051651 2444124 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I0110 02:35:51.051673 2444124 kubeadm.go:158] found existing configuration files:
I0110 02:35:51.051734 2444124 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I0110 02:35:51.059812 2444124 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I0110 02:35:51.059882 2444124 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I0110 02:35:51.068320 2444124 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I0110 02:35:51.076706 2444124 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I0110 02:35:51.076822 2444124 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I0110 02:35:51.084858 2444124 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I0110 02:35:51.093615 2444124 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I0110 02:35:51.093686 2444124 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I0110 02:35:51.101862 2444124 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I0110 02:35:51.110328 2444124 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I0110 02:35:51.110395 2444124 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I0110 02:35:51.118285 2444124 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I0110 02:35:51.161915 2444124 kubeadm.go:319] [init] Using Kubernetes version: v1.35.0
I0110 02:35:51.161979 2444124 kubeadm.go:319] [preflight] Running pre-flight checks
I0110 02:35:51.247247 2444124 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I0110 02:35:51.247324 2444124 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1084-aws[0m
I0110 02:35:51.247366 2444124 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I0110 02:35:51.247418 2444124 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I0110 02:35:51.247473 2444124 kubeadm.go:319] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I0110 02:35:51.247523 2444124 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I0110 02:35:51.247577 2444124 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I0110 02:35:51.247629 2444124 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I0110 02:35:51.247681 2444124 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I0110 02:35:51.247730 2444124 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I0110 02:35:51.247783 2444124 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I0110 02:35:51.247850 2444124 kubeadm.go:319] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I0110 02:35:51.316861 2444124 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I0110 02:35:51.316975 2444124 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I0110 02:35:51.317095 2444124 kubeadm.go:319] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I0110 02:35:51.330675 2444124 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I0110 02:35:51.336295 2444124 out.go:252] - Generating certificates and keys ...
I0110 02:35:51.336385 2444124 kubeadm.go:319] [certs] Using existing ca certificate authority
I0110 02:35:51.336458 2444124 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I0110 02:35:51.336535 2444124 kubeadm.go:319] [certs] Using existing apiserver-kubelet-client certificate and key on disk
I0110 02:35:51.336596 2444124 kubeadm.go:319] [certs] Using existing front-proxy-ca certificate authority
I0110 02:35:51.336666 2444124 kubeadm.go:319] [certs] Using existing front-proxy-client certificate and key on disk
I0110 02:35:51.336720 2444124 kubeadm.go:319] [certs] Using existing etcd/ca certificate authority
I0110 02:35:51.336783 2444124 kubeadm.go:319] [certs] Using existing etcd/server certificate and key on disk
I0110 02:35:51.336844 2444124 kubeadm.go:319] [certs] Using existing etcd/peer certificate and key on disk
I0110 02:35:51.336918 2444124 kubeadm.go:319] [certs] Using existing etcd/healthcheck-client certificate and key on disk
I0110 02:35:51.336991 2444124 kubeadm.go:319] [certs] Using existing apiserver-etcd-client certificate and key on disk
I0110 02:35:51.337028 2444124 kubeadm.go:319] [certs] Using the existing "sa" key
I0110 02:35:51.337115 2444124 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I0110 02:35:51.445329 2444124 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I0110 02:35:51.773916 2444124 kubeadm.go:319] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I0110 02:35:51.845501 2444124 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I0110 02:35:52.201867 2444124 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I0110 02:35:52.810005 2444124 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I0110 02:35:52.810953 2444124 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I0110 02:35:52.813391 2444124 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I0110 02:35:52.196251 2444942 kubeadm.go:319] [kubelet-check] The kubelet is not healthy after 4m0.000438294s
I0110 02:35:52.196284 2444942 kubeadm.go:319]
I0110 02:35:52.196342 2444942 kubeadm.go:319] Unfortunately, an error has occurred, likely caused by:
I0110 02:35:52.196375 2444942 kubeadm.go:319] - The kubelet is not running
I0110 02:35:52.196480 2444942 kubeadm.go:319] - The kubelet is unhealthy due to a misconfiguration of the node in some way (required cgroups disabled)
I0110 02:35:52.196486 2444942 kubeadm.go:319]
I0110 02:35:52.196591 2444942 kubeadm.go:319] If you are on a systemd-powered system, you can try to troubleshoot the error with the following commands:
I0110 02:35:52.196622 2444942 kubeadm.go:319] - 'systemctl status kubelet'
I0110 02:35:52.196653 2444942 kubeadm.go:319] - 'journalctl -xeu kubelet'
I0110 02:35:52.196658 2444942 kubeadm.go:319]
I0110 02:35:52.202848 2444942 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1084-aws\n", err: exit status 1
I0110 02:35:52.203270 2444942 kubeadm.go:319] [WARNING SystemVerification]: cgroups v1 support is deprecated and will be removed in a future release. Please migrate to cgroups v2. To explicitly enable cgroups v1 support for kubelet v1.35 or newer, you must set the kubelet configuration option 'FailCgroupV1' to 'false'. You must also explicitly skip this validation. For more information, see https://git.k8s.io/enhancements/keps/sig-node/5573-remove-cgroup-v1
I0110 02:35:52.203377 2444942 kubeadm.go:319] [WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I0110 02:35:52.203640 2444942 kubeadm.go:319] error: error execution phase wait-control-plane: failed while waiting for the kubelet to start: The HTTP call equal to 'curl -sSL http://127.0.0.1:10248/healthz' returned error: Get "http://127.0.0.1:10248/healthz": dial tcp 127.0.0.1:10248: connect: connection refused
I0110 02:35:52.203646 2444942 kubeadm.go:319]
I0110 02:35:52.203714 2444942 kubeadm.go:319] To see the stack trace of this error execute with --v=5 or higher
W0110 02:35:52.203844 2444942 out.go:285] ! initialization failed, will try again: wait: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables": Process exited with status 1
stdout:
[init] Using Kubernetes version: v1.35.0
[preflight] Running pre-flight checks
[preflight] The system verification failed. Printing the output from the verification:
[0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1084-aws[0m
[0;37mOS[0m: [0;32mLinux[0m
[0;37mCGROUPS_CPU[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
[0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
[0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
[0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
[0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
[0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
[0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/var/lib/minikube/certs"
[certs] Using existing ca certificate authority
[certs] Using existing apiserver certificate and key on disk
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [force-systemd-flag-389625 localhost] and IPs [192.168.76.2 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [force-systemd-flag-389625 localhost] and IPs [192.168.76.2 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "super-admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
[patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
[kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
[kubelet-check] The kubelet is not healthy after 4m0.000438294s
Unfortunately, an error has occurred, likely caused by:
- The kubelet is not running
- The kubelet is unhealthy due to a misconfiguration of the node in some way (required cgroups disabled)
If you are on a systemd-powered system, you can try to troubleshoot the error with the following commands:
- 'systemctl status kubelet'
- 'journalctl -xeu kubelet'
stderr:
[WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1084-aws\n", err: exit status 1
[WARNING SystemVerification]: cgroups v1 support is deprecated and will be removed in a future release. Please migrate to cgroups v2. To explicitly enable cgroups v1 support for kubelet v1.35 or newer, you must set the kubelet configuration option 'FailCgroupV1' to 'false'. You must also explicitly skip this validation. For more information, see https://git.k8s.io/enhancements/keps/sig-node/5573-remove-cgroup-v1
[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
error: error execution phase wait-control-plane: failed while waiting for the kubelet to start: The HTTP call equal to 'curl -sSL http://127.0.0.1:10248/healthz' returned error: Get "http://127.0.0.1:10248/healthz": dial tcp 127.0.0.1:10248: connect: connection refused
To see the stack trace of this error execute with --v=5 or higher
I0110 02:35:52.203917 2444942 ssh_runner.go:195] Run: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm reset --cri-socket /var/run/cri-dockerd.sock --force"
I0110 02:35:52.668064 2444942 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I0110 02:35:52.684406 2444942 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver
I0110 02:35:52.684471 2444942 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I0110 02:35:52.694960 2444942 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I0110 02:35:52.695030 2444942 kubeadm.go:158] found existing configuration files:
I0110 02:35:52.695114 2444942 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I0110 02:35:52.703880 2444942 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I0110 02:35:52.703940 2444942 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I0110 02:35:52.712165 2444942 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I0110 02:35:52.721863 2444942 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I0110 02:35:52.721985 2444942 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I0110 02:35:52.731171 2444942 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I0110 02:35:52.740287 2444942 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I0110 02:35:52.740404 2444942 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I0110 02:35:52.748618 2444942 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I0110 02:35:52.757969 2444942 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I0110 02:35:52.758029 2444942 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I0110 02:35:52.766204 2444942 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I0110 02:35:52.819064 2444942 kubeadm.go:319] [init] Using Kubernetes version: v1.35.0
I0110 02:35:52.819481 2444942 kubeadm.go:319] [preflight] Running pre-flight checks
I0110 02:35:52.927559 2444942 kubeadm.go:319] [preflight] The system verification failed. Printing the output from the verification:
I0110 02:35:52.927642 2444942 kubeadm.go:319] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1084-aws[0m
I0110 02:35:52.927679 2444942 kubeadm.go:319] [0;37mOS[0m: [0;32mLinux[0m
I0110 02:35:52.927725 2444942 kubeadm.go:319] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I0110 02:35:52.927773 2444942 kubeadm.go:319] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I0110 02:35:52.927829 2444942 kubeadm.go:319] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I0110 02:35:52.927879 2444942 kubeadm.go:319] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I0110 02:35:52.927933 2444942 kubeadm.go:319] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I0110 02:35:52.927982 2444942 kubeadm.go:319] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I0110 02:35:52.928027 2444942 kubeadm.go:319] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I0110 02:35:52.928076 2444942 kubeadm.go:319] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I0110 02:35:52.928122 2444942 kubeadm.go:319] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I0110 02:35:53.012278 2444942 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster
I0110 02:35:53.012391 2444942 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection
I0110 02:35:53.012483 2444942 kubeadm.go:319] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I0110 02:35:53.037432 2444942 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I0110 02:35:52.816841 2444124 out.go:252] - Booting up control plane ...
I0110 02:35:52.816944 2444124 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I0110 02:35:52.817023 2444124 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I0110 02:35:52.828369 2444124 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I0110 02:35:52.849764 2444124 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I0110 02:35:52.849875 2444124 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
I0110 02:35:52.858304 2444124 kubeadm.go:319] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
I0110 02:35:52.858625 2444124 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I0110 02:35:52.858672 2444124 kubeadm.go:319] [kubelet-start] Starting the kubelet
I0110 02:35:53.019244 2444124 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I0110 02:35:53.019363 2444124 kubeadm.go:319] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I0110 02:35:53.040921 2444942 out.go:252] - Generating certificates and keys ...
I0110 02:35:53.041059 2444942 kubeadm.go:319] [certs] Using existing ca certificate authority
I0110 02:35:53.041136 2444942 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk
I0110 02:35:53.041218 2444942 kubeadm.go:319] [certs] Using existing apiserver-kubelet-client certificate and key on disk
I0110 02:35:53.041284 2444942 kubeadm.go:319] [certs] Using existing front-proxy-ca certificate authority
I0110 02:35:53.041359 2444942 kubeadm.go:319] [certs] Using existing front-proxy-client certificate and key on disk
I0110 02:35:53.041417 2444942 kubeadm.go:319] [certs] Using existing etcd/ca certificate authority
I0110 02:35:53.041484 2444942 kubeadm.go:319] [certs] Using existing etcd/server certificate and key on disk
I0110 02:35:53.041550 2444942 kubeadm.go:319] [certs] Using existing etcd/peer certificate and key on disk
I0110 02:35:53.041630 2444942 kubeadm.go:319] [certs] Using existing etcd/healthcheck-client certificate and key on disk
I0110 02:35:53.041707 2444942 kubeadm.go:319] [certs] Using existing apiserver-etcd-client certificate and key on disk
I0110 02:35:53.041749 2444942 kubeadm.go:319] [certs] Using the existing "sa" key
I0110 02:35:53.041814 2444942 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I0110 02:35:53.331718 2444942 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file
I0110 02:35:53.451638 2444942 kubeadm.go:319] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I0110 02:35:53.804134 2444942 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I0110 02:35:54.036793 2444942 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I0110 02:35:54.605846 2444942 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I0110 02:35:54.606454 2444942 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I0110 02:35:54.608995 2444942 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I0110 02:35:54.612162 2444942 out.go:252] - Booting up control plane ...
I0110 02:35:54.612265 2444942 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver"
I0110 02:35:54.612343 2444942 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I0110 02:35:54.612409 2444942 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler"
I0110 02:35:54.632870 2444942 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I0110 02:35:54.633407 2444942 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
I0110 02:35:54.640913 2444942 kubeadm.go:319] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
I0110 02:35:54.641255 2444942 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I0110 02:35:54.641302 2444942 kubeadm.go:319] [kubelet-start] Starting the kubelet
I0110 02:35:54.777508 2444942 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I0110 02:35:54.777628 2444942 kubeadm.go:319] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I0110 02:39:53.016701 2444124 kubeadm.go:319] [kubelet-check] The kubelet is not healthy after 4m0.000239671s
I0110 02:39:53.016728 2444124 kubeadm.go:319]
I0110 02:39:53.016782 2444124 kubeadm.go:319] Unfortunately, an error has occurred, likely caused by:
I0110 02:39:53.016814 2444124 kubeadm.go:319] - The kubelet is not running
I0110 02:39:53.016913 2444124 kubeadm.go:319] - The kubelet is unhealthy due to a misconfiguration of the node in some way (required cgroups disabled)
I0110 02:39:53.016917 2444124 kubeadm.go:319]
I0110 02:39:53.017016 2444124 kubeadm.go:319] If you are on a systemd-powered system, you can try to troubleshoot the error with the following commands:
I0110 02:39:53.017069 2444124 kubeadm.go:319] - 'systemctl status kubelet'
I0110 02:39:53.017100 2444124 kubeadm.go:319] - 'journalctl -xeu kubelet'
I0110 02:39:53.017110 2444124 kubeadm.go:319]
I0110 02:39:53.026674 2444124 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1084-aws\n", err: exit status 1
I0110 02:39:53.027207 2444124 kubeadm.go:319] [WARNING SystemVerification]: cgroups v1 support is deprecated and will be removed in a future release. Please migrate to cgroups v2. To explicitly enable cgroups v1 support for kubelet v1.35 or newer, you must set the kubelet configuration option 'FailCgroupV1' to 'false'. You must also explicitly skip this validation. For more information, see https://git.k8s.io/enhancements/keps/sig-node/5573-remove-cgroup-v1
I0110 02:39:53.027347 2444124 kubeadm.go:319] [WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I0110 02:39:53.027605 2444124 kubeadm.go:319] error: error execution phase wait-control-plane: failed while waiting for the kubelet to start: The HTTP call equal to 'curl -sSL http://127.0.0.1:10248/healthz' returned error: Get "http://127.0.0.1:10248/healthz": context deadline exceeded
I0110 02:39:53.027624 2444124 kubeadm.go:319]
I0110 02:39:53.027707 2444124 kubeadm.go:319] To see the stack trace of this error execute with --v=5 or higher
I0110 02:39:53.027777 2444124 kubeadm.go:403] duration metric: took 8m7.349453429s to StartCluster
I0110 02:39:53.027818 2444124 cri.go:61] listing CRI containers in root : {State:all Name:kube-apiserver Namespaces:[]}
I0110 02:39:53.027886 2444124 ssh_runner.go:195] Run: sudo crictl --timeout=10s ps -a --quiet --name=kube-apiserver
I0110 02:39:53.065190 2444124 cri.go:96] found id: ""
I0110 02:39:53.065233 2444124 logs.go:282] 0 containers: []
W0110 02:39:53.065243 2444124 logs.go:284] No container was found matching "kube-apiserver"
I0110 02:39:53.065251 2444124 cri.go:61] listing CRI containers in root : {State:all Name:etcd Namespaces:[]}
I0110 02:39:53.065314 2444124 ssh_runner.go:195] Run: sudo crictl --timeout=10s ps -a --quiet --name=etcd
I0110 02:39:53.090958 2444124 cri.go:96] found id: ""
I0110 02:39:53.090984 2444124 logs.go:282] 0 containers: []
W0110 02:39:53.090993 2444124 logs.go:284] No container was found matching "etcd"
I0110 02:39:53.091000 2444124 cri.go:61] listing CRI containers in root : {State:all Name:coredns Namespaces:[]}
I0110 02:39:53.091077 2444124 ssh_runner.go:195] Run: sudo crictl --timeout=10s ps -a --quiet --name=coredns
I0110 02:39:53.117931 2444124 cri.go:96] found id: ""
I0110 02:39:53.117955 2444124 logs.go:282] 0 containers: []
W0110 02:39:53.117964 2444124 logs.go:284] No container was found matching "coredns"
I0110 02:39:53.117972 2444124 cri.go:61] listing CRI containers in root : {State:all Name:kube-scheduler Namespaces:[]}
I0110 02:39:53.118031 2444124 ssh_runner.go:195] Run: sudo crictl --timeout=10s ps -a --quiet --name=kube-scheduler
I0110 02:39:53.143724 2444124 cri.go:96] found id: ""
I0110 02:39:53.143749 2444124 logs.go:282] 0 containers: []
W0110 02:39:53.143757 2444124 logs.go:284] No container was found matching "kube-scheduler"
I0110 02:39:53.143764 2444124 cri.go:61] listing CRI containers in root : {State:all Name:kube-proxy Namespaces:[]}
I0110 02:39:53.143823 2444124 ssh_runner.go:195] Run: sudo crictl --timeout=10s ps -a --quiet --name=kube-proxy
I0110 02:39:53.168452 2444124 cri.go:96] found id: ""
I0110 02:39:53.168477 2444124 logs.go:282] 0 containers: []
W0110 02:39:53.168486 2444124 logs.go:284] No container was found matching "kube-proxy"
I0110 02:39:53.168492 2444124 cri.go:61] listing CRI containers in root : {State:all Name:kube-controller-manager Namespaces:[]}
I0110 02:39:53.168550 2444124 ssh_runner.go:195] Run: sudo crictl --timeout=10s ps -a --quiet --name=kube-controller-manager
I0110 02:39:53.194925 2444124 cri.go:96] found id: ""
I0110 02:39:53.194960 2444124 logs.go:282] 0 containers: []
W0110 02:39:53.194969 2444124 logs.go:284] No container was found matching "kube-controller-manager"
I0110 02:39:53.194976 2444124 cri.go:61] listing CRI containers in root : {State:all Name:kindnet Namespaces:[]}
I0110 02:39:53.195047 2444124 ssh_runner.go:195] Run: sudo crictl --timeout=10s ps -a --quiet --name=kindnet
I0110 02:39:53.220058 2444124 cri.go:96] found id: ""
I0110 02:39:53.220083 2444124 logs.go:282] 0 containers: []
W0110 02:39:53.220100 2444124 logs.go:284] No container was found matching "kindnet"
I0110 02:39:53.220110 2444124 logs.go:123] Gathering logs for describe nodes ...
I0110 02:39:53.220122 2444124 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W0110 02:39:53.285618 2444124 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
E0110 02:39:53.276636 5574 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8443/api?timeout=32s\": dial tcp [::1]:8443: connect: connection refused"
E0110 02:39:53.277286 5574 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8443/api?timeout=32s\": dial tcp [::1]:8443: connect: connection refused"
E0110 02:39:53.278970 5574 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8443/api?timeout=32s\": dial tcp [::1]:8443: connect: connection refused"
E0110 02:39:53.279530 5574 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8443/api?timeout=32s\": dial tcp [::1]:8443: connect: connection refused"
E0110 02:39:53.281145 5574 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8443/api?timeout=32s\": dial tcp [::1]:8443: connect: connection refused"
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
E0110 02:39:53.276636 5574 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8443/api?timeout=32s\": dial tcp [::1]:8443: connect: connection refused"
E0110 02:39:53.277286 5574 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8443/api?timeout=32s\": dial tcp [::1]:8443: connect: connection refused"
E0110 02:39:53.278970 5574 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8443/api?timeout=32s\": dial tcp [::1]:8443: connect: connection refused"
E0110 02:39:53.279530 5574 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8443/api?timeout=32s\": dial tcp [::1]:8443: connect: connection refused"
E0110 02:39:53.281145 5574 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8443/api?timeout=32s\": dial tcp [::1]:8443: connect: connection refused"
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I0110 02:39:53.285639 2444124 logs.go:123] Gathering logs for Docker ...
I0110 02:39:53.285650 2444124 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u docker -u cri-docker -n 400"
I0110 02:39:53.308836 2444124 logs.go:123] Gathering logs for container status ...
I0110 02:39:53.308869 2444124 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I0110 02:39:53.341659 2444124 logs.go:123] Gathering logs for kubelet ...
I0110 02:39:53.341684 2444124 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I0110 02:39:53.401462 2444124 logs.go:123] Gathering logs for dmesg ...
I0110 02:39:53.401506 2444124 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
W0110 02:39:53.419441 2444124 out.go:434] Error starting cluster: wait: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables": Process exited with status 1
stdout:
[init] Using Kubernetes version: v1.35.0
[preflight] Running pre-flight checks
[preflight] The system verification failed. Printing the output from the verification:
[0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1084-aws[0m
[0;37mOS[0m: [0;32mLinux[0m
[0;37mCGROUPS_CPU[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
[0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
[0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
[0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
[0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
[0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
[0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/var/lib/minikube/certs"
[certs] Using existing ca certificate authority
[certs] Using existing apiserver certificate and key on disk
[certs] Using existing apiserver-kubelet-client certificate and key on disk
[certs] Using existing front-proxy-ca certificate authority
[certs] Using existing front-proxy-client certificate and key on disk
[certs] Using existing etcd/ca certificate authority
[certs] Using existing etcd/server certificate and key on disk
[certs] Using existing etcd/peer certificate and key on disk
[certs] Using existing etcd/healthcheck-client certificate and key on disk
[certs] Using existing apiserver-etcd-client certificate and key on disk
[certs] Using the existing "sa" key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "super-admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
[patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
[kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
[kubelet-check] The kubelet is not healthy after 4m0.000239671s
Unfortunately, an error has occurred, likely caused by:
- The kubelet is not running
- The kubelet is unhealthy due to a misconfiguration of the node in some way (required cgroups disabled)
If you are on a systemd-powered system, you can try to troubleshoot the error with the following commands:
- 'systemctl status kubelet'
- 'journalctl -xeu kubelet'
stderr:
[WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1084-aws\n", err: exit status 1
[WARNING SystemVerification]: cgroups v1 support is deprecated and will be removed in a future release. Please migrate to cgroups v2. To explicitly enable cgroups v1 support for kubelet v1.35 or newer, you must set the kubelet configuration option 'FailCgroupV1' to 'false'. You must also explicitly skip this validation. For more information, see https://git.k8s.io/enhancements/keps/sig-node/5573-remove-cgroup-v1
[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
error: error execution phase wait-control-plane: failed while waiting for the kubelet to start: The HTTP call equal to 'curl -sSL http://127.0.0.1:10248/healthz' returned error: Get "http://127.0.0.1:10248/healthz": context deadline exceeded
To see the stack trace of this error execute with --v=5 or higher
W0110 02:39:53.419490 2444124 out.go:285] *
W0110 02:39:53.419567 2444124 out.go:285] X Error starting cluster: wait: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables": Process exited with status 1
stdout:
[init] Using Kubernetes version: v1.35.0
[preflight] Running pre-flight checks
[preflight] The system verification failed. Printing the output from the verification:
[0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1084-aws[0m
[0;37mOS[0m: [0;32mLinux[0m
[0;37mCGROUPS_CPU[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
[0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
[0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
[0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
[0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
[0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
[0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/var/lib/minikube/certs"
[certs] Using existing ca certificate authority
[certs] Using existing apiserver certificate and key on disk
[certs] Using existing apiserver-kubelet-client certificate and key on disk
[certs] Using existing front-proxy-ca certificate authority
[certs] Using existing front-proxy-client certificate and key on disk
[certs] Using existing etcd/ca certificate authority
[certs] Using existing etcd/server certificate and key on disk
[certs] Using existing etcd/peer certificate and key on disk
[certs] Using existing etcd/healthcheck-client certificate and key on disk
[certs] Using existing apiserver-etcd-client certificate and key on disk
[certs] Using the existing "sa" key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "super-admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
[patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
[kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
[kubelet-check] The kubelet is not healthy after 4m0.000239671s
Unfortunately, an error has occurred, likely caused by:
- The kubelet is not running
- The kubelet is unhealthy due to a misconfiguration of the node in some way (required cgroups disabled)
If you are on a systemd-powered system, you can try to troubleshoot the error with the following commands:
- 'systemctl status kubelet'
- 'journalctl -xeu kubelet'
stderr:
[WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1084-aws\n", err: exit status 1
[WARNING SystemVerification]: cgroups v1 support is deprecated and will be removed in a future release. Please migrate to cgroups v2. To explicitly enable cgroups v1 support for kubelet v1.35 or newer, you must set the kubelet configuration option 'FailCgroupV1' to 'false'. You must also explicitly skip this validation. For more information, see https://git.k8s.io/enhancements/keps/sig-node/5573-remove-cgroup-v1
[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
error: error execution phase wait-control-plane: failed while waiting for the kubelet to start: The HTTP call equal to 'curl -sSL http://127.0.0.1:10248/healthz' returned error: Get "http://127.0.0.1:10248/healthz": context deadline exceeded
To see the stack trace of this error execute with --v=5 or higher
W0110 02:39:53.419587 2444124 out.go:285] *
W0110 02:39:53.419862 2444124 out.go:308] ╭─────────────────────────────────────────────────────────────────────────────────────────────╮
│ │
│ * If the above advice does not help, please let us know: │
│ https://github.com/kubernetes/minikube/issues/new/choose │
│ │
│ * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue. │
│ │
╰─────────────────────────────────────────────────────────────────────────────────────────────╯
I0110 02:39:53.424999 2444124 out.go:203]
W0110 02:39:53.428767 2444124 out.go:285] X Exiting due to K8S_KUBELET_NOT_RUNNING: wait: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables": Process exited with status 1
stdout:
[init] Using Kubernetes version: v1.35.0
[preflight] Running pre-flight checks
[preflight] The system verification failed. Printing the output from the verification:
[0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1084-aws[0m
[0;37mOS[0m: [0;32mLinux[0m
[0;37mCGROUPS_CPU[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
[0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
[0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
[0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
[0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
[0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
[0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/var/lib/minikube/certs"
[certs] Using existing ca certificate authority
[certs] Using existing apiserver certificate and key on disk
[certs] Using existing apiserver-kubelet-client certificate and key on disk
[certs] Using existing front-proxy-ca certificate authority
[certs] Using existing front-proxy-client certificate and key on disk
[certs] Using existing etcd/ca certificate authority
[certs] Using existing etcd/server certificate and key on disk
[certs] Using existing etcd/peer certificate and key on disk
[certs] Using existing etcd/healthcheck-client certificate and key on disk
[certs] Using existing apiserver-etcd-client certificate and key on disk
[certs] Using the existing "sa" key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "super-admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
[patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
[kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
[kubelet-check] The kubelet is not healthy after 4m0.000239671s
Unfortunately, an error has occurred, likely caused by:
- The kubelet is not running
- The kubelet is unhealthy due to a misconfiguration of the node in some way (required cgroups disabled)
If you are on a systemd-powered system, you can try to troubleshoot the error with the following commands:
- 'systemctl status kubelet'
- 'journalctl -xeu kubelet'
stderr:
[WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1084-aws\n", err: exit status 1
[WARNING SystemVerification]: cgroups v1 support is deprecated and will be removed in a future release. Please migrate to cgroups v2. To explicitly enable cgroups v1 support for kubelet v1.35 or newer, you must set the kubelet configuration option 'FailCgroupV1' to 'false'. You must also explicitly skip this validation. For more information, see https://git.k8s.io/enhancements/keps/sig-node/5573-remove-cgroup-v1
[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
error: error execution phase wait-control-plane: failed while waiting for the kubelet to start: The HTTP call equal to 'curl -sSL http://127.0.0.1:10248/healthz' returned error: Get "http://127.0.0.1:10248/healthz": context deadline exceeded
To see the stack trace of this error execute with --v=5 or higher
W0110 02:39:53.428834 2444124 out.go:285] * Suggestion: Check output of 'journalctl -xeu kubelet', try passing --extra-config=kubelet.cgroup-driver=systemd to minikube start
W0110 02:39:53.428862 2444124 out.go:285] * Related issue: https://github.com/kubernetes/minikube/issues/4172
I0110 02:39:53.431997 2444124 out.go:203]
I0110 02:39:54.778464 2444942 kubeadm.go:319] [kubelet-check] The kubelet is not healthy after 4m0.001115013s
I0110 02:39:54.778491 2444942 kubeadm.go:319]
I0110 02:39:54.778555 2444942 kubeadm.go:319] Unfortunately, an error has occurred, likely caused by:
I0110 02:39:54.778601 2444942 kubeadm.go:319] - The kubelet is not running
I0110 02:39:54.778725 2444942 kubeadm.go:319] - The kubelet is unhealthy due to a misconfiguration of the node in some way (required cgroups disabled)
I0110 02:39:54.778735 2444942 kubeadm.go:319]
I0110 02:39:54.778847 2444942 kubeadm.go:319] If you are on a systemd-powered system, you can try to troubleshoot the error with the following commands:
I0110 02:39:54.778883 2444942 kubeadm.go:319] - 'systemctl status kubelet'
I0110 02:39:54.778919 2444942 kubeadm.go:319] - 'journalctl -xeu kubelet'
I0110 02:39:54.778927 2444942 kubeadm.go:319]
I0110 02:39:54.783246 2444942 kubeadm.go:319] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1084-aws\n", err: exit status 1
I0110 02:39:54.783712 2444942 kubeadm.go:319] [WARNING SystemVerification]: cgroups v1 support is deprecated and will be removed in a future release. Please migrate to cgroups v2. To explicitly enable cgroups v1 support for kubelet v1.35 or newer, you must set the kubelet configuration option 'FailCgroupV1' to 'false'. You must also explicitly skip this validation. For more information, see https://git.k8s.io/enhancements/keps/sig-node/5573-remove-cgroup-v1
I0110 02:39:54.783842 2444942 kubeadm.go:319] [WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I0110 02:39:54.784133 2444942 kubeadm.go:319] error: error execution phase wait-control-plane: failed while waiting for the kubelet to start: The HTTP call equal to 'curl -sSL http://127.0.0.1:10248/healthz' returned error: Get "http://127.0.0.1:10248/healthz": dial tcp 127.0.0.1:10248: connect: connection refused
I0110 02:39:54.784143 2444942 kubeadm.go:319]
I0110 02:39:54.784229 2444942 kubeadm.go:319] To see the stack trace of this error execute with --v=5 or higher
I0110 02:39:54.784293 2444942 kubeadm.go:403] duration metric: took 8m6.786690861s to StartCluster
I0110 02:39:54.784334 2444942 cri.go:61] listing CRI containers in root : {State:all Name:kube-apiserver Namespaces:[]}
I0110 02:39:54.784409 2444942 ssh_runner.go:195] Run: sudo crictl --timeout=10s ps -a --quiet --name=kube-apiserver
I0110 02:39:54.833811 2444942 cri.go:96] found id: ""
I0110 02:39:54.833848 2444942 logs.go:282] 0 containers: []
W0110 02:39:54.833857 2444942 logs.go:284] No container was found matching "kube-apiserver"
I0110 02:39:54.833864 2444942 cri.go:61] listing CRI containers in root : {State:all Name:etcd Namespaces:[]}
I0110 02:39:54.833927 2444942 ssh_runner.go:195] Run: sudo crictl --timeout=10s ps -a --quiet --name=etcd
I0110 02:39:54.874597 2444942 cri.go:96] found id: ""
I0110 02:39:54.874676 2444942 logs.go:282] 0 containers: []
W0110 02:39:54.874698 2444942 logs.go:284] No container was found matching "etcd"
I0110 02:39:54.874717 2444942 cri.go:61] listing CRI containers in root : {State:all Name:coredns Namespaces:[]}
I0110 02:39:54.874799 2444942 ssh_runner.go:195] Run: sudo crictl --timeout=10s ps -a --quiet --name=coredns
I0110 02:39:54.907340 2444942 cri.go:96] found id: ""
I0110 02:39:54.907364 2444942 logs.go:282] 0 containers: []
W0110 02:39:54.907372 2444942 logs.go:284] No container was found matching "coredns"
I0110 02:39:54.907379 2444942 cri.go:61] listing CRI containers in root : {State:all Name:kube-scheduler Namespaces:[]}
I0110 02:39:54.907439 2444942 ssh_runner.go:195] Run: sudo crictl --timeout=10s ps -a --quiet --name=kube-scheduler
I0110 02:39:54.942974 2444942 cri.go:96] found id: ""
I0110 02:39:54.943001 2444942 logs.go:282] 0 containers: []
W0110 02:39:54.943010 2444942 logs.go:284] No container was found matching "kube-scheduler"
I0110 02:39:54.943018 2444942 cri.go:61] listing CRI containers in root : {State:all Name:kube-proxy Namespaces:[]}
I0110 02:39:54.943077 2444942 ssh_runner.go:195] Run: sudo crictl --timeout=10s ps -a --quiet --name=kube-proxy
I0110 02:39:54.981427 2444942 cri.go:96] found id: ""
I0110 02:39:54.981449 2444942 logs.go:282] 0 containers: []
W0110 02:39:54.981458 2444942 logs.go:284] No container was found matching "kube-proxy"
I0110 02:39:54.981465 2444942 cri.go:61] listing CRI containers in root : {State:all Name:kube-controller-manager Namespaces:[]}
I0110 02:39:54.981531 2444942 ssh_runner.go:195] Run: sudo crictl --timeout=10s ps -a --quiet --name=kube-controller-manager
I0110 02:39:55.041924 2444942 cri.go:96] found id: ""
I0110 02:39:55.041946 2444942 logs.go:282] 0 containers: []
W0110 02:39:55.041994 2444942 logs.go:284] No container was found matching "kube-controller-manager"
I0110 02:39:55.042004 2444942 cri.go:61] listing CRI containers in root : {State:all Name:kindnet Namespaces:[]}
I0110 02:39:55.042072 2444942 ssh_runner.go:195] Run: sudo crictl --timeout=10s ps -a --quiet --name=kindnet
I0110 02:39:55.114566 2444942 cri.go:96] found id: ""
I0110 02:39:55.114587 2444942 logs.go:282] 0 containers: []
W0110 02:39:55.114596 2444942 logs.go:284] No container was found matching "kindnet"
I0110 02:39:55.114606 2444942 logs.go:123] Gathering logs for describe nodes ...
I0110 02:39:55.114634 2444942 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
W0110 02:39:55.229791 2444942 logs.go:130] failed describe nodes: command: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
E0110 02:39:55.208165 5551 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8443/api?timeout=32s\": dial tcp [::1]:8443: connect: connection refused"
E0110 02:39:55.208559 5551 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8443/api?timeout=32s\": dial tcp [::1]:8443: connect: connection refused"
E0110 02:39:55.217197 5551 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8443/api?timeout=32s\": dial tcp [::1]:8443: connect: connection refused"
E0110 02:39:55.218001 5551 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8443/api?timeout=32s\": dial tcp [::1]:8443: connect: connection refused"
E0110 02:39:55.222039 5551 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8443/api?timeout=32s\": dial tcp [::1]:8443: connect: connection refused"
The connection to the server localhost:8443 was refused - did you specify the right host or port?
output:
** stderr **
E0110 02:39:55.208165 5551 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8443/api?timeout=32s\": dial tcp [::1]:8443: connect: connection refused"
E0110 02:39:55.208559 5551 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8443/api?timeout=32s\": dial tcp [::1]:8443: connect: connection refused"
E0110 02:39:55.217197 5551 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8443/api?timeout=32s\": dial tcp [::1]:8443: connect: connection refused"
E0110 02:39:55.218001 5551 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8443/api?timeout=32s\": dial tcp [::1]:8443: connect: connection refused"
E0110 02:39:55.222039 5551 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8443/api?timeout=32s\": dial tcp [::1]:8443: connect: connection refused"
The connection to the server localhost:8443 was refused - did you specify the right host or port?
** /stderr **
I0110 02:39:55.229812 2444942 logs.go:123] Gathering logs for Docker ...
I0110 02:39:55.229837 2444942 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u docker -u cri-docker -n 400"
I0110 02:39:55.267290 2444942 logs.go:123] Gathering logs for container status ...
I0110 02:39:55.267338 2444942 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I0110 02:39:55.359988 2444942 logs.go:123] Gathering logs for kubelet ...
I0110 02:39:55.360018 2444942 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I0110 02:39:55.456371 2444942 logs.go:123] Gathering logs for dmesg ...
I0110 02:39:55.456405 2444942 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
W0110 02:39:55.476932 2444942 out.go:434] Error starting cluster: wait: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables": Process exited with status 1
stdout:
[init] Using Kubernetes version: v1.35.0
[preflight] Running pre-flight checks
[preflight] The system verification failed. Printing the output from the verification:
[0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1084-aws[0m
[0;37mOS[0m: [0;32mLinux[0m
[0;37mCGROUPS_CPU[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
[0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
[0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
[0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
[0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
[0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
[0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/var/lib/minikube/certs"
[certs] Using existing ca certificate authority
[certs] Using existing apiserver certificate and key on disk
[certs] Using existing apiserver-kubelet-client certificate and key on disk
[certs] Using existing front-proxy-ca certificate authority
[certs] Using existing front-proxy-client certificate and key on disk
[certs] Using existing etcd/ca certificate authority
[certs] Using existing etcd/server certificate and key on disk
[certs] Using existing etcd/peer certificate and key on disk
[certs] Using existing etcd/healthcheck-client certificate and key on disk
[certs] Using existing apiserver-etcd-client certificate and key on disk
[certs] Using the existing "sa" key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "super-admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
[patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
[kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
[kubelet-check] The kubelet is not healthy after 4m0.001115013s
Unfortunately, an error has occurred, likely caused by:
- The kubelet is not running
- The kubelet is unhealthy due to a misconfiguration of the node in some way (required cgroups disabled)
If you are on a systemd-powered system, you can try to troubleshoot the error with the following commands:
- 'systemctl status kubelet'
- 'journalctl -xeu kubelet'
stderr:
[WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1084-aws\n", err: exit status 1
[WARNING SystemVerification]: cgroups v1 support is deprecated and will be removed in a future release. Please migrate to cgroups v2. To explicitly enable cgroups v1 support for kubelet v1.35 or newer, you must set the kubelet configuration option 'FailCgroupV1' to 'false'. You must also explicitly skip this validation. For more information, see https://git.k8s.io/enhancements/keps/sig-node/5573-remove-cgroup-v1
[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
error: error execution phase wait-control-plane: failed while waiting for the kubelet to start: The HTTP call equal to 'curl -sSL http://127.0.0.1:10248/healthz' returned error: Get "http://127.0.0.1:10248/healthz": dial tcp 127.0.0.1:10248: connect: connection refused
To see the stack trace of this error execute with --v=5 or higher
W0110 02:39:55.476973 2444942 out.go:285] *
W0110 02:39:55.477022 2444942 out.go:285] X Error starting cluster: wait: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables": Process exited with status 1
stdout:
[init] Using Kubernetes version: v1.35.0
[preflight] Running pre-flight checks
[preflight] The system verification failed. Printing the output from the verification:
[0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1084-aws[0m
[0;37mOS[0m: [0;32mLinux[0m
[0;37mCGROUPS_CPU[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
[0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
[0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
[0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
[0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
[0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
[0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/var/lib/minikube/certs"
[certs] Using existing ca certificate authority
[certs] Using existing apiserver certificate and key on disk
[certs] Using existing apiserver-kubelet-client certificate and key on disk
[certs] Using existing front-proxy-ca certificate authority
[certs] Using existing front-proxy-client certificate and key on disk
[certs] Using existing etcd/ca certificate authority
[certs] Using existing etcd/server certificate and key on disk
[certs] Using existing etcd/peer certificate and key on disk
[certs] Using existing etcd/healthcheck-client certificate and key on disk
[certs] Using existing apiserver-etcd-client certificate and key on disk
[certs] Using the existing "sa" key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "super-admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
[patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
[kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
[kubelet-check] The kubelet is not healthy after 4m0.001115013s
Unfortunately, an error has occurred, likely caused by:
- The kubelet is not running
- The kubelet is unhealthy due to a misconfiguration of the node in some way (required cgroups disabled)
If you are on a systemd-powered system, you can try to troubleshoot the error with the following commands:
- 'systemctl status kubelet'
- 'journalctl -xeu kubelet'
stderr:
[WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1084-aws\n", err: exit status 1
[WARNING SystemVerification]: cgroups v1 support is deprecated and will be removed in a future release. Please migrate to cgroups v2. To explicitly enable cgroups v1 support for kubelet v1.35 or newer, you must set the kubelet configuration option 'FailCgroupV1' to 'false'. You must also explicitly skip this validation. For more information, see https://git.k8s.io/enhancements/keps/sig-node/5573-remove-cgroup-v1
[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
error: error execution phase wait-control-plane: failed while waiting for the kubelet to start: The HTTP call equal to 'curl -sSL http://127.0.0.1:10248/healthz' returned error: Get "http://127.0.0.1:10248/healthz": dial tcp 127.0.0.1:10248: connect: connection refused
To see the stack trace of this error execute with --v=5 or higher
W0110 02:39:55.477184 2444942 out.go:285] *
W0110 02:39:55.477459 2444942 out.go:308] ╭─────────────────────────────────────────────────────────────────────────────────────────────╮
│ │
│ * If the above advice does not help, please let us know: │
│ https://github.com/kubernetes/minikube/issues/new/choose │
│ │
│ * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue. │
│ │
╰─────────────────────────────────────────────────────────────────────────────────────────────╯
I0110 02:39:55.484573 2444942 out.go:203]
W0110 02:39:55.488432 2444942 out.go:285] X Exiting due to K8S_KUBELET_NOT_RUNNING: wait: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.35.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables": Process exited with status 1
stdout:
[init] Using Kubernetes version: v1.35.0
[preflight] Running pre-flight checks
[preflight] The system verification failed. Printing the output from the verification:
[0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1084-aws[0m
[0;37mOS[0m: [0;32mLinux[0m
[0;37mCGROUPS_CPU[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
[0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
[0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
[0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
[0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
[0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
[0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
[0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/var/lib/minikube/certs"
[certs] Using existing ca certificate authority
[certs] Using existing apiserver certificate and key on disk
[certs] Using existing apiserver-kubelet-client certificate and key on disk
[certs] Using existing front-proxy-ca certificate authority
[certs] Using existing front-proxy-client certificate and key on disk
[certs] Using existing etcd/ca certificate authority
[certs] Using existing etcd/server certificate and key on disk
[certs] Using existing etcd/peer certificate and key on disk
[certs] Using existing etcd/healthcheck-client certificate and key on disk
[certs] Using existing apiserver-etcd-client certificate and key on disk
[certs] Using the existing "sa" key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "super-admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml"
[patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
[kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
[kubelet-check] The kubelet is not healthy after 4m0.001115013s
Unfortunately, an error has occurred, likely caused by:
- The kubelet is not running
- The kubelet is unhealthy due to a misconfiguration of the node in some way (required cgroups disabled)
If you are on a systemd-powered system, you can try to troubleshoot the error with the following commands:
- 'systemctl status kubelet'
- 'journalctl -xeu kubelet'
stderr:
[WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1084-aws\n", err: exit status 1
[WARNING SystemVerification]: cgroups v1 support is deprecated and will be removed in a future release. Please migrate to cgroups v2. To explicitly enable cgroups v1 support for kubelet v1.35 or newer, you must set the kubelet configuration option 'FailCgroupV1' to 'false'. You must also explicitly skip this validation. For more information, see https://git.k8s.io/enhancements/keps/sig-node/5573-remove-cgroup-v1
[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
error: error execution phase wait-control-plane: failed while waiting for the kubelet to start: The HTTP call equal to 'curl -sSL http://127.0.0.1:10248/healthz' returned error: Get "http://127.0.0.1:10248/healthz": dial tcp 127.0.0.1:10248: connect: connection refused
To see the stack trace of this error execute with --v=5 or higher
W0110 02:39:55.488495 2444942 out.go:285] * Suggestion: Check output of 'journalctl -xeu kubelet', try passing --extra-config=kubelet.cgroup-driver=systemd to minikube start
W0110 02:39:55.488519 2444942 out.go:285] * Related issue: https://github.com/kubernetes/minikube/issues/4172
I0110 02:39:55.491695 2444942 out.go:203]
==> Docker <==
Jan 10 02:31:45 force-systemd-flag-389625 dockerd[1145]: time="2026-01-10T02:31:45.012742498Z" level=info msg="Restoring containers: start."
Jan 10 02:31:45 force-systemd-flag-389625 dockerd[1145]: time="2026-01-10T02:31:45.039653876Z" level=info msg="Deleting nftables IPv4 rules" error="exit status 1"
Jan 10 02:31:45 force-systemd-flag-389625 dockerd[1145]: time="2026-01-10T02:31:45.059486515Z" level=info msg="Deleting nftables IPv6 rules" error="exit status 1"
Jan 10 02:31:45 force-systemd-flag-389625 dockerd[1145]: time="2026-01-10T02:31:45.370327087Z" level=info msg="Loading containers: done."
Jan 10 02:31:45 force-systemd-flag-389625 dockerd[1145]: time="2026-01-10T02:31:45.394815390Z" level=warning msg="WARNING: Support for cgroup v1 is deprecated and planned to be removed by no later than May 2029 (https://github.com/moby/moby/issues/51111)"
Jan 10 02:31:45 force-systemd-flag-389625 dockerd[1145]: time="2026-01-10T02:31:45.395046844Z" level=info msg="Docker daemon" commit=fbf3ed2 containerd-snapshotter=false storage-driver=overlay2 version=29.1.3
Jan 10 02:31:45 force-systemd-flag-389625 dockerd[1145]: time="2026-01-10T02:31:45.395153262Z" level=info msg="Initializing buildkit"
Jan 10 02:31:45 force-systemd-flag-389625 dockerd[1145]: time="2026-01-10T02:31:45.416439936Z" level=info msg="Completed buildkit initialization"
Jan 10 02:31:45 force-systemd-flag-389625 dockerd[1145]: time="2026-01-10T02:31:45.425849266Z" level=info msg="Daemon has completed initialization"
Jan 10 02:31:45 force-systemd-flag-389625 systemd[1]: Started docker.service - Docker Application Container Engine.
Jan 10 02:31:45 force-systemd-flag-389625 dockerd[1145]: time="2026-01-10T02:31:45.429143505Z" level=info msg="API listen on /var/run/docker.sock"
Jan 10 02:31:45 force-systemd-flag-389625 dockerd[1145]: time="2026-01-10T02:31:45.429362668Z" level=info msg="API listen on /run/docker.sock"
Jan 10 02:31:45 force-systemd-flag-389625 dockerd[1145]: time="2026-01-10T02:31:45.429475954Z" level=info msg="API listen on [::]:2376"
Jan 10 02:31:46 force-systemd-flag-389625 systemd[1]: Starting cri-docker.service - CRI Interface for Docker Application Container Engine...
Jan 10 02:31:46 force-systemd-flag-389625 cri-dockerd[1430]: time="2026-01-10T02:31:46Z" level=info msg="Starting cri-dockerd dev (HEAD)"
Jan 10 02:31:46 force-systemd-flag-389625 cri-dockerd[1430]: time="2026-01-10T02:31:46Z" level=info msg="Connecting to docker on the Endpoint unix:///var/run/docker.sock"
Jan 10 02:31:46 force-systemd-flag-389625 cri-dockerd[1430]: time="2026-01-10T02:31:46Z" level=info msg="Start docker client with request timeout 0s"
Jan 10 02:31:46 force-systemd-flag-389625 cri-dockerd[1430]: time="2026-01-10T02:31:46Z" level=info msg="Hairpin mode is set to hairpin-veth"
Jan 10 02:31:46 force-systemd-flag-389625 cri-dockerd[1430]: time="2026-01-10T02:31:46Z" level=info msg="Loaded network plugin cni"
Jan 10 02:31:46 force-systemd-flag-389625 cri-dockerd[1430]: time="2026-01-10T02:31:46Z" level=info msg="Docker cri networking managed by network plugin cni"
Jan 10 02:31:46 force-systemd-flag-389625 cri-dockerd[1430]: time="2026-01-10T02:31:46Z" level=info msg="Setting cgroupDriver systemd"
Jan 10 02:31:46 force-systemd-flag-389625 cri-dockerd[1430]: time="2026-01-10T02:31:46Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:,},}"
Jan 10 02:31:46 force-systemd-flag-389625 cri-dockerd[1430]: time="2026-01-10T02:31:46Z" level=info msg="Starting the GRPC backend for the Docker CRI interface."
Jan 10 02:31:46 force-systemd-flag-389625 cri-dockerd[1430]: time="2026-01-10T02:31:46Z" level=info msg="Start cri-dockerd grpc backend"
Jan 10 02:31:46 force-systemd-flag-389625 systemd[1]: Started cri-docker.service - CRI Interface for Docker Application Container Engine.
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE
==> describe nodes <==
command /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig" failed with error: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.35.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig": Process exited with status 1
stdout:
stderr:
E0110 02:39:57.254952 5697 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8443/api?timeout=32s\": dial tcp [::1]:8443: connect: connection refused"
E0110 02:39:57.255524 5697 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8443/api?timeout=32s\": dial tcp [::1]:8443: connect: connection refused"
E0110 02:39:57.257252 5697 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8443/api?timeout=32s\": dial tcp [::1]:8443: connect: connection refused"
E0110 02:39:57.257716 5697 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8443/api?timeout=32s\": dial tcp [::1]:8443: connect: connection refused"
E0110 02:39:57.259170 5697 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://localhost:8443/api?timeout=32s\": dial tcp [::1]:8443: connect: connection refused"
The connection to the server localhost:8443 was refused - did you specify the right host or port?
==> dmesg <==
[Jan10 01:53] kauditd_printk_skb: 8 callbacks suppressed
[Jan10 02:01] kauditd_printk_skb: 8 callbacks suppressed
==> kernel <==
02:39:57 up 10:22, 0 user, load average: 0.20, 0.85, 1.77
Linux force-systemd-flag-389625 5.15.0-1084-aws #91~20.04.1-Ubuntu SMP Fri May 2 07:00:04 UTC 2025 aarch64 GNU/Linux
PRETTY_NAME="Debian GNU/Linux 12 (bookworm)"
==> kubelet <==
Jan 10 02:39:53 force-systemd-flag-389625 systemd[1]: kubelet.service: Failed with result 'exit-code'.
Jan 10 02:39:54 force-systemd-flag-389625 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 319.
Jan 10 02:39:54 force-systemd-flag-389625 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent.
Jan 10 02:39:54 force-systemd-flag-389625 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent.
Jan 10 02:39:54 force-systemd-flag-389625 kubelet[5480]: E0110 02:39:54.286012 5480 run.go:72] "command failed" err="failed to validate kubelet configuration, error: kubelet is configured to not run on a host using cgroup v1. cgroup v1 support is unsupported and will be removed in a future release, path: &TypeMeta{Kind:,APIVersion:,}"
Jan 10 02:39:54 force-systemd-flag-389625 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE
Jan 10 02:39:54 force-systemd-flag-389625 systemd[1]: kubelet.service: Failed with result 'exit-code'.
Jan 10 02:39:54 force-systemd-flag-389625 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 320.
Jan 10 02:39:54 force-systemd-flag-389625 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent.
Jan 10 02:39:54 force-systemd-flag-389625 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent.
Jan 10 02:39:55 force-systemd-flag-389625 kubelet[5528]: E0110 02:39:55.119544 5528 run.go:72] "command failed" err="failed to validate kubelet configuration, error: kubelet is configured to not run on a host using cgroup v1. cgroup v1 support is unsupported and will be removed in a future release, path: &TypeMeta{Kind:,APIVersion:,}"
Jan 10 02:39:55 force-systemd-flag-389625 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE
Jan 10 02:39:55 force-systemd-flag-389625 systemd[1]: kubelet.service: Failed with result 'exit-code'.
Jan 10 02:39:55 force-systemd-flag-389625 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 321.
Jan 10 02:39:55 force-systemd-flag-389625 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent.
Jan 10 02:39:55 force-systemd-flag-389625 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent.
Jan 10 02:39:56 force-systemd-flag-389625 kubelet[5581]: E0110 02:39:56.133774 5581 run.go:72] "command failed" err="failed to validate kubelet configuration, error: kubelet is configured to not run on a host using cgroup v1. cgroup v1 support is unsupported and will be removed in a future release, path: &TypeMeta{Kind:,APIVersion:,}"
Jan 10 02:39:56 force-systemd-flag-389625 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE
Jan 10 02:39:56 force-systemd-flag-389625 systemd[1]: kubelet.service: Failed with result 'exit-code'.
Jan 10 02:39:56 force-systemd-flag-389625 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 322.
Jan 10 02:39:56 force-systemd-flag-389625 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent.
Jan 10 02:39:56 force-systemd-flag-389625 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent.
Jan 10 02:39:57 force-systemd-flag-389625 kubelet[5628]: E0110 02:39:57.022536 5628 run.go:72] "command failed" err="failed to validate kubelet configuration, error: kubelet is configured to not run on a host using cgroup v1. cgroup v1 support is unsupported and will be removed in a future release, path: &TypeMeta{Kind:,APIVersion:,}"
Jan 10 02:39:57 force-systemd-flag-389625 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE
Jan 10 02:39:57 force-systemd-flag-389625 systemd[1]: kubelet.service: Failed with result 'exit-code'.
-- /stdout --
helpers_test.go:263: (dbg) Run: out/minikube-linux-arm64 status --format={{.APIServer}} -p force-systemd-flag-389625 -n force-systemd-flag-389625
helpers_test.go:263: (dbg) Non-zero exit: out/minikube-linux-arm64 status --format={{.APIServer}} -p force-systemd-flag-389625 -n force-systemd-flag-389625: exit status 6 (457.107211ms)
-- stdout --
Stopped
WARNING: Your kubectl is pointing to stale minikube-vm.
To fix the kubectl context, run `minikube update-context`
-- /stdout --
** stderr **
E0110 02:39:58.090441 2458176 status.go:458] kubeconfig endpoint: get endpoint: "force-systemd-flag-389625" does not appear in /home/jenkins/minikube-integration/22414-2221005/kubeconfig
** /stderr **
helpers_test.go:263: status error: exit status 6 (may be ok)
helpers_test.go:265: "force-systemd-flag-389625" apiserver is not running, skipping kubectl commands (state="Stopped")
helpers_test.go:176: Cleaning up "force-systemd-flag-389625" profile ...
helpers_test.go:179: (dbg) Run: out/minikube-linux-arm64 delete -p force-systemd-flag-389625
helpers_test.go:179: (dbg) Done: out/minikube-linux-arm64 delete -p force-systemd-flag-389625: (2.125299621s)
--- FAIL: TestForceSystemdFlag (508.91s)