=== RUN TestFunctional/serial/ComponentHealth
functional_test.go:810: (dbg) Run: kubectl --context functional-771033 get po -l tier=control-plane -n kube-system -o=json
functional_test.go:825: etcd phase: Running
functional_test.go:833: etcd is not Ready: {Phase:Running Conditions:[{Type:PodReadyToStartContainers Status:True} {Type:Initialized Status:True} {Type:Ready Status:False} {Type:ContainersReady Status:True} {Type:PodScheduled Status:True}] Message: Reason: HostIP:192.168.39.95 PodIP:192.168.39.95 StartTime:2024-08-18 18:47:13 +0000 UTC ContainerStatuses:[{Name:etcd State:{Waiting:<nil> Running:0xc002194b10 Terminated:<nil>} LastTerminationState:{Waiting:<nil> Running:<nil> Terminated:0xc0021361c0} Ready:true RestartCount:3 Image:registry.k8s.io/etcd:3.5.15-0 ImageID:docker-pullable://registry.k8s.io/etcd@sha256:a6dc63e6e8cfa0307d7851762fa6b629afb18f28d8aa3fab5a6e91b4af60026a ContainerID:docker://0565a39bb52bfe91d52e6d9f0dde7ee191fcf0d165870a9643cc1a2a6c38ff63}]}
functional_test.go:825: kube-apiserver phase: Running
functional_test.go:833: kube-apiserver is not Ready: {Phase:Running Conditions:[{Type:PodReadyToStartContainers Status:True} {Type:Initialized Status:True} {Type:Ready Status:False} {Type:ContainersReady Status:False} {Type:PodScheduled Status:True}] Message: Reason: HostIP:192.168.39.95 PodIP:192.168.39.95 StartTime:2024-08-18 18:48:27 +0000 UTC ContainerStatuses:[{Name:kube-apiserver State:{Waiting:<nil> Running:0xc002194b70 Terminated:<nil>} LastTerminationState:{Waiting:<nil> Running:<nil> Terminated:<nil>} Ready:false RestartCount:0 Image:registry.k8s.io/kube-apiserver:v1.31.0 ImageID:docker-pullable://registry.k8s.io/kube-apiserver@sha256:470179274deb9dc3a81df55cfc24823ce153147d4ebf2ed649a4f271f51eaddf ContainerID:docker://1df53b51ee39c5227c8f0fe4bf5959801f0fdaa8c3287c7c3d3f9081e6a60d98}]}
functional_test.go:825: kube-controller-manager phase: Running
functional_test.go:833: kube-controller-manager is not Ready: {Phase:Running Conditions:[{Type:PodReadyToStartContainers Status:True} {Type:Initialized Status:True} {Type:Ready Status:False} {Type:ContainersReady Status:True} {Type:PodScheduled Status:True}] Message: Reason: HostIP:192.168.39.95 PodIP:192.168.39.95 StartTime:2024-08-18 18:47:13 +0000 UTC ContainerStatuses:[{Name:kube-controller-manager State:{Waiting:<nil> Running:0xc002194bd0 Terminated:<nil>} LastTerminationState:{Waiting:<nil> Running:<nil> Terminated:0xc002136230} Ready:true RestartCount:3 Image:registry.k8s.io/kube-controller-manager:v1.31.0 ImageID:docker-pullable://registry.k8s.io/kube-controller-manager@sha256:f6f3c33dda209e8434b83dacf5244c03b59b0018d93325ff21296a142b68497d ContainerID:docker://4854ca3a4bea46ceb2056f41fe683b89cd407000aaf47da2951ae43578fa8ca8}]}
functional_test.go:825: kube-scheduler phase: Running
functional_test.go:833: kube-scheduler is not Ready: {Phase:Running Conditions:[{Type:PodReadyToStartContainers Status:True} {Type:Initialized Status:True} {Type:Ready Status:False} {Type:ContainersReady Status:True} {Type:PodScheduled Status:True}] Message: Reason: HostIP:192.168.39.95 PodIP:192.168.39.95 StartTime:2024-08-18 18:47:13 +0000 UTC ContainerStatuses:[{Name:kube-scheduler State:{Waiting:<nil> Running:0xc002194c30 Terminated:<nil>} LastTerminationState:{Waiting:<nil> Running:<nil> Terminated:0xc0021362a0} Ready:true RestartCount:3 Image:registry.k8s.io/kube-scheduler:v1.31.0 ImageID:docker-pullable://registry.k8s.io/kube-scheduler@sha256:96ddae9c9b2e79342e0551e2d2ec422c0c02629a74d928924aaa069706619808 ContainerID:docker://354afb0d0718fa574064211a716bc6faf8a82cdc3905bbec45ebc7b2154862c8}]}
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:239: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p functional-771033 -n functional-771033
helpers_test.go:244: <<< TestFunctional/serial/ComponentHealth FAILED: start of post-mortem logs <<<
helpers_test.go:245: ======> post-mortem[TestFunctional/serial/ComponentHealth]: minikube logs <======
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 -p functional-771033 logs -n 25
helpers_test.go:252: TestFunctional/serial/ComponentHealth logs:
-- stdout --
==> Audit <==
|---------|--------------------------------------------------------------------------|-------------------|---------|---------|---------------------|---------------------|
| Command | Args | Profile | User | Version | Start Time | End Time |
|---------|--------------------------------------------------------------------------|-------------------|---------|---------|---------------------|---------------------|
| unpause | nospam-290448 --log_dir | nospam-290448 | jenkins | v1.33.1 | 18 Aug 24 18:44 UTC | 18 Aug 24 18:44 UTC |
| | /tmp/nospam-290448 unpause | | | | | |
| unpause | nospam-290448 --log_dir | nospam-290448 | jenkins | v1.33.1 | 18 Aug 24 18:44 UTC | 18 Aug 24 18:44 UTC |
| | /tmp/nospam-290448 unpause | | | | | |
| unpause | nospam-290448 --log_dir | nospam-290448 | jenkins | v1.33.1 | 18 Aug 24 18:44 UTC | 18 Aug 24 18:44 UTC |
| | /tmp/nospam-290448 unpause | | | | | |
| stop | nospam-290448 --log_dir | nospam-290448 | jenkins | v1.33.1 | 18 Aug 24 18:44 UTC | 18 Aug 24 18:44 UTC |
| | /tmp/nospam-290448 stop | | | | | |
| stop | nospam-290448 --log_dir | nospam-290448 | jenkins | v1.33.1 | 18 Aug 24 18:44 UTC | 18 Aug 24 18:45 UTC |
| | /tmp/nospam-290448 stop | | | | | |
| stop | nospam-290448 --log_dir | nospam-290448 | jenkins | v1.33.1 | 18 Aug 24 18:45 UTC | 18 Aug 24 18:45 UTC |
| | /tmp/nospam-290448 stop | | | | | |
| delete | -p nospam-290448 | nospam-290448 | jenkins | v1.33.1 | 18 Aug 24 18:45 UTC | 18 Aug 24 18:45 UTC |
| start | -p functional-771033 | functional-771033 | jenkins | v1.33.1 | 18 Aug 24 18:45 UTC | 18 Aug 24 18:46 UTC |
| | --memory=4000 | | | | | |
| | --apiserver-port=8441 | | | | | |
| | --wait=all --driver=kvm2 | | | | | |
| start | -p functional-771033 | functional-771033 | jenkins | v1.33.1 | 18 Aug 24 18:46 UTC | 18 Aug 24 18:46 UTC |
| | --alsologtostderr -v=8 | | | | | |
| cache | functional-771033 cache add | functional-771033 | jenkins | v1.33.1 | 18 Aug 24 18:46 UTC | 18 Aug 24 18:46 UTC |
| | registry.k8s.io/pause:3.1 | | | | | |
| cache | functional-771033 cache add | functional-771033 | jenkins | v1.33.1 | 18 Aug 24 18:46 UTC | 18 Aug 24 18:46 UTC |
| | registry.k8s.io/pause:3.3 | | | | | |
| cache | functional-771033 cache add | functional-771033 | jenkins | v1.33.1 | 18 Aug 24 18:46 UTC | 18 Aug 24 18:46 UTC |
| | registry.k8s.io/pause:latest | | | | | |
| cache | functional-771033 cache add | functional-771033 | jenkins | v1.33.1 | 18 Aug 24 18:46 UTC | 18 Aug 24 18:46 UTC |
| | minikube-local-cache-test:functional-771033 | | | | | |
| cache | functional-771033 cache delete | functional-771033 | jenkins | v1.33.1 | 18 Aug 24 18:46 UTC | 18 Aug 24 18:46 UTC |
| | minikube-local-cache-test:functional-771033 | | | | | |
| cache | delete | minikube | jenkins | v1.33.1 | 18 Aug 24 18:46 UTC | 18 Aug 24 18:46 UTC |
| | registry.k8s.io/pause:3.3 | | | | | |
| cache | list | minikube | jenkins | v1.33.1 | 18 Aug 24 18:46 UTC | 18 Aug 24 18:46 UTC |
| ssh | functional-771033 ssh sudo | functional-771033 | jenkins | v1.33.1 | 18 Aug 24 18:46 UTC | 18 Aug 24 18:46 UTC |
| | crictl images | | | | | |
| ssh | functional-771033 | functional-771033 | jenkins | v1.33.1 | 18 Aug 24 18:46 UTC | 18 Aug 24 18:46 UTC |
| | ssh sudo docker rmi | | | | | |
| | registry.k8s.io/pause:latest | | | | | |
| ssh | functional-771033 ssh | functional-771033 | jenkins | v1.33.1 | 18 Aug 24 18:46 UTC | |
| | sudo crictl inspecti | | | | | |
| | registry.k8s.io/pause:latest | | | | | |
| cache | functional-771033 cache reload | functional-771033 | jenkins | v1.33.1 | 18 Aug 24 18:46 UTC | 18 Aug 24 18:46 UTC |
| ssh | functional-771033 ssh | functional-771033 | jenkins | v1.33.1 | 18 Aug 24 18:46 UTC | 18 Aug 24 18:46 UTC |
| | sudo crictl inspecti | | | | | |
| | registry.k8s.io/pause:latest | | | | | |
| cache | delete | minikube | jenkins | v1.33.1 | 18 Aug 24 18:46 UTC | 18 Aug 24 18:46 UTC |
| | registry.k8s.io/pause:3.1 | | | | | |
| cache | delete | minikube | jenkins | v1.33.1 | 18 Aug 24 18:46 UTC | 18 Aug 24 18:46 UTC |
| | registry.k8s.io/pause:latest | | | | | |
| kubectl | functional-771033 kubectl -- | functional-771033 | jenkins | v1.33.1 | 18 Aug 24 18:46 UTC | 18 Aug 24 18:46 UTC |
| | --context functional-771033 | | | | | |
| | get pods | | | | | |
| start | -p functional-771033 | functional-771033 | jenkins | v1.33.1 | 18 Aug 24 18:46 UTC | 18 Aug 24 18:48 UTC |
| | --extra-config=apiserver.enable-admission-plugins=NamespaceAutoProvision | | | | | |
| | --wait=all | | | | | |
|---------|--------------------------------------------------------------------------|-------------------|---------|---------|---------------------|---------------------|
==> Last Start <==
Log file created at: 2024/08/18 18:46:54
Running on machine: ubuntu-20-agent-3
Binary: Built with gc go1.22.5 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I0818 18:46:54.632770 1158634 out.go:345] Setting OutFile to fd 1 ...
I0818 18:46:54.633019 1158634 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0818 18:46:54.633023 1158634 out.go:358] Setting ErrFile to fd 2...
I0818 18:46:54.633026 1158634 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0818 18:46:54.633182 1158634 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19423-1145725/.minikube/bin
I0818 18:46:54.633724 1158634 out.go:352] Setting JSON to false
I0818 18:46:54.634591 1158634 start.go:129] hostinfo: {"hostname":"ubuntu-20-agent-3","uptime":98916,"bootTime":1723907899,"procs":192,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1066-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I0818 18:46:54.634643 1158634 start.go:139] virtualization: kvm guest
I0818 18:46:54.636617 1158634 out.go:177] * [functional-771033] minikube v1.33.1 on Ubuntu 20.04 (kvm/amd64)
I0818 18:46:54.637665 1158634 out.go:177] - MINIKUBE_LOCATION=19423
I0818 18:46:54.637713 1158634 notify.go:220] Checking for updates...
I0818 18:46:54.639803 1158634 out.go:177] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I0818 18:46:54.640852 1158634 out.go:177] - KUBECONFIG=/home/jenkins/minikube-integration/19423-1145725/kubeconfig
I0818 18:46:54.641935 1158634 out.go:177] - MINIKUBE_HOME=/home/jenkins/minikube-integration/19423-1145725/.minikube
I0818 18:46:54.642983 1158634 out.go:177] - MINIKUBE_BIN=out/minikube-linux-amd64
I0818 18:46:54.644069 1158634 out.go:177] - MINIKUBE_FORCE_SYSTEMD=
I0818 18:46:54.645476 1158634 config.go:182] Loaded profile config "functional-771033": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.31.0
I0818 18:46:54.645562 1158634 driver.go:394] Setting default libvirt URI to qemu:///system
I0818 18:46:54.645961 1158634 main.go:141] libmachine: Found binary path at /home/jenkins/workspace/KVM_Linux_integration/out/docker-machine-driver-kvm2
I0818 18:46:54.646018 1158634 main.go:141] libmachine: Launching plugin server for driver kvm2
I0818 18:46:54.662338 1158634 main.go:141] libmachine: Plugin server listening at address 127.0.0.1:39791
I0818 18:46:54.662755 1158634 main.go:141] libmachine: () Calling .GetVersion
I0818 18:46:54.663343 1158634 main.go:141] libmachine: Using API Version 1
I0818 18:46:54.663352 1158634 main.go:141] libmachine: () Calling .SetConfigRaw
I0818 18:46:54.663677 1158634 main.go:141] libmachine: () Calling .GetMachineName
I0818 18:46:54.663843 1158634 main.go:141] libmachine: (functional-771033) Calling .DriverName
I0818 18:46:54.695841 1158634 out.go:177] * Using the kvm2 driver based on existing profile
I0818 18:46:54.696931 1158634 start.go:297] selected driver: kvm2
I0818 18:46:54.696946 1158634 start.go:901] validating driver "kvm2" against &{Name:functional-771033 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/19452/minikube-v1.33.1-1723740674-19452-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d Memory:4000 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8441 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{Kuber
netesVersion:v1.31.0 ClusterName:functional-771033 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.39.95 Port:8441 KubernetesVersion:v1.31.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L M
ountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0818 18:46:54.697048 1158634 start.go:912] status for kvm2: {Installed:true Healthy:true Running:true NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I0818 18:46:54.697411 1158634 install.go:52] acquiring lock: {Name:mk900956b073697a4aa6c80a27c6bb0742a99a53 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0818 18:46:54.697478 1158634 install.go:117] Validating docker-machine-driver-kvm2, PATH=/home/jenkins/minikube-integration/19423-1145725/.minikube/bin:/home/jenkins/workspace/KVM_Linux_integration/out/:/usr/local/bin:/usr/bin:/bin:/usr/local/games:/usr/games:/usr/local/go/bin:/home/jenkins/go/bin:/usr/local/bin/:/usr/local/go/bin/:/home/jenkins/go/bin
I0818 18:46:54.712701 1158634 install.go:137] /home/jenkins/workspace/KVM_Linux_integration/out/docker-machine-driver-kvm2 version is 1.33.1
I0818 18:46:54.713558 1158634 start_flags.go:947] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0818 18:46:54.713632 1158634 cni.go:84] Creating CNI manager for ""
I0818 18:46:54.713644 1158634 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0818 18:46:54.713707 1158634 start.go:340] cluster config:
{Name:functional-771033 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/19452/minikube-v1.33.1-1723740674-19452-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d Memory:4000 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8441 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.0 ClusterName:functional-771033 Namespace:default APIServer
HAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.39.95 Port:8441 KubernetesVersion:v1.31.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L
MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0818 18:46:54.713817 1158634 iso.go:125] acquiring lock: {Name:mkb8cace5317b9fbdd5a745866acff5ebdb0878a Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0818 18:46:54.715438 1158634 out.go:177] * Starting "functional-771033" primary control-plane node in "functional-771033" cluster
I0818 18:46:54.716610 1158634 preload.go:131] Checking if preload exists for k8s version v1.31.0 and runtime docker
I0818 18:46:54.716640 1158634 preload.go:146] Found local preload: /home/jenkins/minikube-integration/19423-1145725/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.0-docker-overlay2-amd64.tar.lz4
I0818 18:46:54.716646 1158634 cache.go:56] Caching tarball of preloaded images
I0818 18:46:54.716724 1158634 preload.go:172] Found /home/jenkins/minikube-integration/19423-1145725/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.0-docker-overlay2-amd64.tar.lz4 in cache, skipping download
I0818 18:46:54.716730 1158634 cache.go:59] Finished verifying existence of preloaded tar for v1.31.0 on docker
I0818 18:46:54.716821 1158634 profile.go:143] Saving config to /home/jenkins/minikube-integration/19423-1145725/.minikube/profiles/functional-771033/config.json ...
I0818 18:46:54.717022 1158634 start.go:360] acquireMachinesLock for functional-771033: {Name:mk27543e6fe57e5c3e2e26d5ee14b83b659b1354 Clock:{} Delay:500ms Timeout:13m0s Cancel:<nil>}
I0818 18:46:54.717059 1158634 start.go:364] duration metric: took 25.114µs to acquireMachinesLock for "functional-771033"
I0818 18:46:54.717070 1158634 start.go:96] Skipping create...Using existing machine configuration
I0818 18:46:54.717073 1158634 fix.go:54] fixHost starting:
I0818 18:46:54.717403 1158634 main.go:141] libmachine: Found binary path at /home/jenkins/workspace/KVM_Linux_integration/out/docker-machine-driver-kvm2
I0818 18:46:54.717432 1158634 main.go:141] libmachine: Launching plugin server for driver kvm2
I0818 18:46:54.732563 1158634 main.go:141] libmachine: Plugin server listening at address 127.0.0.1:33423
I0818 18:46:54.732989 1158634 main.go:141] libmachine: () Calling .GetVersion
I0818 18:46:54.733575 1158634 main.go:141] libmachine: Using API Version 1
I0818 18:46:54.733600 1158634 main.go:141] libmachine: () Calling .SetConfigRaw
I0818 18:46:54.733932 1158634 main.go:141] libmachine: () Calling .GetMachineName
I0818 18:46:54.734157 1158634 main.go:141] libmachine: (functional-771033) Calling .DriverName
I0818 18:46:54.734286 1158634 main.go:141] libmachine: (functional-771033) Calling .GetState
I0818 18:46:54.735768 1158634 fix.go:112] recreateIfNeeded on functional-771033: state=Running err=<nil>
W0818 18:46:54.735798 1158634 fix.go:138] unexpected machine state, will restart: <nil>
I0818 18:46:54.737372 1158634 out.go:177] * Updating the running kvm2 "functional-771033" VM ...
I0818 18:46:54.738358 1158634 machine.go:93] provisionDockerMachine start ...
I0818 18:46:54.738369 1158634 main.go:141] libmachine: (functional-771033) Calling .DriverName
I0818 18:46:54.738578 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHHostname
I0818 18:46:54.740768 1158634 main.go:141] libmachine: (functional-771033) DBG | domain functional-771033 has defined MAC address 52:54:00:39:c6:04 in network mk-functional-771033
I0818 18:46:54.741072 1158634 main.go:141] libmachine: (functional-771033) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:39:c6:04", ip: ""} in network mk-functional-771033: {Iface:virbr1 ExpiryTime:2024-08-18 19:45:17 +0000 UTC Type:0 Mac:52:54:00:39:c6:04 Iaid: IPaddr:192.168.39.95 Prefix:24 Hostname:functional-771033 Clientid:01:52:54:00:39:c6:04}
I0818 18:46:54.741082 1158634 main.go:141] libmachine: (functional-771033) DBG | domain functional-771033 has defined IP address 192.168.39.95 and MAC address 52:54:00:39:c6:04 in network mk-functional-771033
I0818 18:46:54.741190 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHPort
I0818 18:46:54.741360 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHKeyPath
I0818 18:46:54.741504 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHKeyPath
I0818 18:46:54.741611 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHUsername
I0818 18:46:54.741755 1158634 main.go:141] libmachine: Using SSH client type: native
I0818 18:46:54.741944 1158634 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x82f9c0] 0x832720 <nil> [] 0s} 192.168.39.95 22 <nil> <nil>}
I0818 18:46:54.741950 1158634 main.go:141] libmachine: About to run SSH command:
hostname
I0818 18:46:54.853417 1158634 main.go:141] libmachine: SSH cmd err, output: <nil>: functional-771033
I0818 18:46:54.853442 1158634 main.go:141] libmachine: (functional-771033) Calling .GetMachineName
I0818 18:46:54.853722 1158634 buildroot.go:166] provisioning hostname "functional-771033"
I0818 18:46:54.853743 1158634 main.go:141] libmachine: (functional-771033) Calling .GetMachineName
I0818 18:46:54.853984 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHHostname
I0818 18:46:54.856694 1158634 main.go:141] libmachine: (functional-771033) DBG | domain functional-771033 has defined MAC address 52:54:00:39:c6:04 in network mk-functional-771033
I0818 18:46:54.857047 1158634 main.go:141] libmachine: (functional-771033) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:39:c6:04", ip: ""} in network mk-functional-771033: {Iface:virbr1 ExpiryTime:2024-08-18 19:45:17 +0000 UTC Type:0 Mac:52:54:00:39:c6:04 Iaid: IPaddr:192.168.39.95 Prefix:24 Hostname:functional-771033 Clientid:01:52:54:00:39:c6:04}
I0818 18:46:54.857072 1158634 main.go:141] libmachine: (functional-771033) DBG | domain functional-771033 has defined IP address 192.168.39.95 and MAC address 52:54:00:39:c6:04 in network mk-functional-771033
I0818 18:46:54.857239 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHPort
I0818 18:46:54.857447 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHKeyPath
I0818 18:46:54.857583 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHKeyPath
I0818 18:46:54.857718 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHUsername
I0818 18:46:54.857851 1158634 main.go:141] libmachine: Using SSH client type: native
I0818 18:46:54.858089 1158634 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x82f9c0] 0x832720 <nil> [] 0s} 192.168.39.95 22 <nil> <nil>}
I0818 18:46:54.858100 1158634 main.go:141] libmachine: About to run SSH command:
sudo hostname functional-771033 && echo "functional-771033" | sudo tee /etc/hostname
I0818 18:46:54.988390 1158634 main.go:141] libmachine: SSH cmd err, output: <nil>: functional-771033
I0818 18:46:54.988414 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHHostname
I0818 18:46:54.991354 1158634 main.go:141] libmachine: (functional-771033) DBG | domain functional-771033 has defined MAC address 52:54:00:39:c6:04 in network mk-functional-771033
I0818 18:46:54.991738 1158634 main.go:141] libmachine: (functional-771033) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:39:c6:04", ip: ""} in network mk-functional-771033: {Iface:virbr1 ExpiryTime:2024-08-18 19:45:17 +0000 UTC Type:0 Mac:52:54:00:39:c6:04 Iaid: IPaddr:192.168.39.95 Prefix:24 Hostname:functional-771033 Clientid:01:52:54:00:39:c6:04}
I0818 18:46:54.991764 1158634 main.go:141] libmachine: (functional-771033) DBG | domain functional-771033 has defined IP address 192.168.39.95 and MAC address 52:54:00:39:c6:04 in network mk-functional-771033
I0818 18:46:54.991958 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHPort
I0818 18:46:54.992161 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHKeyPath
I0818 18:46:54.992343 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHKeyPath
I0818 18:46:54.992496 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHUsername
I0818 18:46:54.992661 1158634 main.go:141] libmachine: Using SSH client type: native
I0818 18:46:54.992840 1158634 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x82f9c0] 0x832720 <nil> [] 0s} 192.168.39.95 22 <nil> <nil>}
I0818 18:46:54.992851 1158634 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\sfunctional-771033' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 functional-771033/g' /etc/hosts;
else
echo '127.0.1.1 functional-771033' | sudo tee -a /etc/hosts;
fi
fi
I0818 18:46:55.111092 1158634 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0818 18:46:55.111112 1158634 buildroot.go:172] set auth options {CertDir:/home/jenkins/minikube-integration/19423-1145725/.minikube CaCertPath:/home/jenkins/minikube-integration/19423-1145725/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/19423-1145725/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/19423-1145725/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/19423-1145725/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/19423-1145725/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/19423-1145725/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/19423-1145725/.minikube}
I0818 18:46:55.111154 1158634 buildroot.go:174] setting up certificates
I0818 18:46:55.111164 1158634 provision.go:84] configureAuth start
I0818 18:46:55.111174 1158634 main.go:141] libmachine: (functional-771033) Calling .GetMachineName
I0818 18:46:55.111460 1158634 main.go:141] libmachine: (functional-771033) Calling .GetIP
I0818 18:46:55.114176 1158634 main.go:141] libmachine: (functional-771033) DBG | domain functional-771033 has defined MAC address 52:54:00:39:c6:04 in network mk-functional-771033
I0818 18:46:55.114492 1158634 main.go:141] libmachine: (functional-771033) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:39:c6:04", ip: ""} in network mk-functional-771033: {Iface:virbr1 ExpiryTime:2024-08-18 19:45:17 +0000 UTC Type:0 Mac:52:54:00:39:c6:04 Iaid: IPaddr:192.168.39.95 Prefix:24 Hostname:functional-771033 Clientid:01:52:54:00:39:c6:04}
I0818 18:46:55.114524 1158634 main.go:141] libmachine: (functional-771033) DBG | domain functional-771033 has defined IP address 192.168.39.95 and MAC address 52:54:00:39:c6:04 in network mk-functional-771033
I0818 18:46:55.114639 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHHostname
I0818 18:46:55.116957 1158634 main.go:141] libmachine: (functional-771033) DBG | domain functional-771033 has defined MAC address 52:54:00:39:c6:04 in network mk-functional-771033
I0818 18:46:55.117292 1158634 main.go:141] libmachine: (functional-771033) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:39:c6:04", ip: ""} in network mk-functional-771033: {Iface:virbr1 ExpiryTime:2024-08-18 19:45:17 +0000 UTC Type:0 Mac:52:54:00:39:c6:04 Iaid: IPaddr:192.168.39.95 Prefix:24 Hostname:functional-771033 Clientid:01:52:54:00:39:c6:04}
I0818 18:46:55.117313 1158634 main.go:141] libmachine: (functional-771033) DBG | domain functional-771033 has defined IP address 192.168.39.95 and MAC address 52:54:00:39:c6:04 in network mk-functional-771033
I0818 18:46:55.117426 1158634 provision.go:143] copyHostCerts
I0818 18:46:55.117489 1158634 exec_runner.go:144] found /home/jenkins/minikube-integration/19423-1145725/.minikube/ca.pem, removing ...
I0818 18:46:55.117506 1158634 exec_runner.go:203] rm: /home/jenkins/minikube-integration/19423-1145725/.minikube/ca.pem
I0818 18:46:55.117572 1158634 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19423-1145725/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/19423-1145725/.minikube/ca.pem (1078 bytes)
I0818 18:46:55.117675 1158634 exec_runner.go:144] found /home/jenkins/minikube-integration/19423-1145725/.minikube/cert.pem, removing ...
I0818 18:46:55.117678 1158634 exec_runner.go:203] rm: /home/jenkins/minikube-integration/19423-1145725/.minikube/cert.pem
I0818 18:46:55.117701 1158634 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19423-1145725/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/19423-1145725/.minikube/cert.pem (1123 bytes)
I0818 18:46:55.117809 1158634 exec_runner.go:144] found /home/jenkins/minikube-integration/19423-1145725/.minikube/key.pem, removing ...
I0818 18:46:55.117813 1158634 exec_runner.go:203] rm: /home/jenkins/minikube-integration/19423-1145725/.minikube/key.pem
I0818 18:46:55.117839 1158634 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19423-1145725/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/19423-1145725/.minikube/key.pem (1679 bytes)
I0818 18:46:55.117902 1158634 provision.go:117] generating server cert: /home/jenkins/minikube-integration/19423-1145725/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/19423-1145725/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/19423-1145725/.minikube/certs/ca-key.pem org=jenkins.functional-771033 san=[127.0.0.1 192.168.39.95 functional-771033 localhost minikube]
I0818 18:46:55.306636 1158634 provision.go:177] copyRemoteCerts
I0818 18:46:55.306691 1158634 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0818 18:46:55.306718 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHHostname
I0818 18:46:55.309787 1158634 main.go:141] libmachine: (functional-771033) DBG | domain functional-771033 has defined MAC address 52:54:00:39:c6:04 in network mk-functional-771033
I0818 18:46:55.310111 1158634 main.go:141] libmachine: (functional-771033) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:39:c6:04", ip: ""} in network mk-functional-771033: {Iface:virbr1 ExpiryTime:2024-08-18 19:45:17 +0000 UTC Type:0 Mac:52:54:00:39:c6:04 Iaid: IPaddr:192.168.39.95 Prefix:24 Hostname:functional-771033 Clientid:01:52:54:00:39:c6:04}
I0818 18:46:55.310136 1158634 main.go:141] libmachine: (functional-771033) DBG | domain functional-771033 has defined IP address 192.168.39.95 and MAC address 52:54:00:39:c6:04 in network mk-functional-771033
I0818 18:46:55.310298 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHPort
I0818 18:46:55.310519 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHKeyPath
I0818 18:46:55.310701 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHUsername
I0818 18:46:55.310801 1158634 sshutil.go:53] new ssh client: &{IP:192.168.39.95 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/19423-1145725/.minikube/machines/functional-771033/id_rsa Username:docker}
I0818 18:46:55.395300 1158634 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19423-1145725/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I0818 18:46:55.422858 1158634 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19423-1145725/.minikube/machines/server.pem --> /etc/docker/server.pem (1220 bytes)
I0818 18:46:55.447353 1158634 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19423-1145725/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I0818 18:46:55.471923 1158634 provision.go:87] duration metric: took 360.744563ms to configureAuth
I0818 18:46:55.471946 1158634 buildroot.go:189] setting minikube options for container-runtime
I0818 18:46:55.472139 1158634 config.go:182] Loaded profile config "functional-771033": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.31.0
I0818 18:46:55.472176 1158634 main.go:141] libmachine: (functional-771033) Calling .DriverName
I0818 18:46:55.472491 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHHostname
I0818 18:46:55.475271 1158634 main.go:141] libmachine: (functional-771033) DBG | domain functional-771033 has defined MAC address 52:54:00:39:c6:04 in network mk-functional-771033
I0818 18:46:55.475629 1158634 main.go:141] libmachine: (functional-771033) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:39:c6:04", ip: ""} in network mk-functional-771033: {Iface:virbr1 ExpiryTime:2024-08-18 19:45:17 +0000 UTC Type:0 Mac:52:54:00:39:c6:04 Iaid: IPaddr:192.168.39.95 Prefix:24 Hostname:functional-771033 Clientid:01:52:54:00:39:c6:04}
I0818 18:46:55.475654 1158634 main.go:141] libmachine: (functional-771033) DBG | domain functional-771033 has defined IP address 192.168.39.95 and MAC address 52:54:00:39:c6:04 in network mk-functional-771033
I0818 18:46:55.475755 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHPort
I0818 18:46:55.475951 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHKeyPath
I0818 18:46:55.476110 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHKeyPath
I0818 18:46:55.476229 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHUsername
I0818 18:46:55.476417 1158634 main.go:141] libmachine: Using SSH client type: native
I0818 18:46:55.476588 1158634 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x82f9c0] 0x832720 <nil> [] 0s} 192.168.39.95 22 <nil> <nil>}
I0818 18:46:55.476593 1158634 main.go:141] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I0818 18:46:55.591442 1158634 main.go:141] libmachine: SSH cmd err, output: <nil>: tmpfs
I0818 18:46:55.591456 1158634 buildroot.go:70] root file system type: tmpfs
I0818 18:46:55.591582 1158634 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I0818 18:46:55.591601 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHHostname
I0818 18:46:55.594651 1158634 main.go:141] libmachine: (functional-771033) DBG | domain functional-771033 has defined MAC address 52:54:00:39:c6:04 in network mk-functional-771033
I0818 18:46:55.594985 1158634 main.go:141] libmachine: (functional-771033) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:39:c6:04", ip: ""} in network mk-functional-771033: {Iface:virbr1 ExpiryTime:2024-08-18 19:45:17 +0000 UTC Type:0 Mac:52:54:00:39:c6:04 Iaid: IPaddr:192.168.39.95 Prefix:24 Hostname:functional-771033 Clientid:01:52:54:00:39:c6:04}
I0818 18:46:55.595012 1158634 main.go:141] libmachine: (functional-771033) DBG | domain functional-771033 has defined IP address 192.168.39.95 and MAC address 52:54:00:39:c6:04 in network mk-functional-771033
I0818 18:46:55.595253 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHPort
I0818 18:46:55.595441 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHKeyPath
I0818 18:46:55.595607 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHKeyPath
I0818 18:46:55.595717 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHUsername
I0818 18:46:55.595860 1158634 main.go:141] libmachine: Using SSH client type: native
I0818 18:46:55.596031 1158634 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x82f9c0] 0x832720 <nil> [] 0s} 192.168.39.95 22 <nil> <nil>}
I0818 18:46:55.596081 1158634 main.go:141] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network.target minikube-automount.service docker.socket
Requires= minikube-automount.service docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I0818 18:46:55.724197 1158634 main.go:141] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network.target minikube-automount.service docker.socket
Requires= minikube-automount.service docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=kvm2 --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
I0818 18:46:55.724219 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHHostname
I0818 18:46:55.726837 1158634 main.go:141] libmachine: (functional-771033) DBG | domain functional-771033 has defined MAC address 52:54:00:39:c6:04 in network mk-functional-771033
I0818 18:46:55.727107 1158634 main.go:141] libmachine: (functional-771033) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:39:c6:04", ip: ""} in network mk-functional-771033: {Iface:virbr1 ExpiryTime:2024-08-18 19:45:17 +0000 UTC Type:0 Mac:52:54:00:39:c6:04 Iaid: IPaddr:192.168.39.95 Prefix:24 Hostname:functional-771033 Clientid:01:52:54:00:39:c6:04}
I0818 18:46:55.727127 1158634 main.go:141] libmachine: (functional-771033) DBG | domain functional-771033 has defined IP address 192.168.39.95 and MAC address 52:54:00:39:c6:04 in network mk-functional-771033
I0818 18:46:55.727277 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHPort
I0818 18:46:55.727441 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHKeyPath
I0818 18:46:55.727646 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHKeyPath
I0818 18:46:55.727826 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHUsername
I0818 18:46:55.728001 1158634 main.go:141] libmachine: Using SSH client type: native
I0818 18:46:55.728176 1158634 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x82f9c0] 0x832720 <nil> [] 0s} 192.168.39.95 22 <nil> <nil>}
I0818 18:46:55.728187 1158634 main.go:141] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I0818 18:46:55.847629 1158634 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0818 18:46:55.847665 1158634 machine.go:96] duration metric: took 1.109284588s to provisionDockerMachine
I0818 18:46:55.847682 1158634 start.go:293] postStartSetup for "functional-771033" (driver="kvm2")
I0818 18:46:55.847695 1158634 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0818 18:46:55.847719 1158634 main.go:141] libmachine: (functional-771033) Calling .DriverName
I0818 18:46:55.848060 1158634 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0818 18:46:55.848100 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHHostname
I0818 18:46:55.850885 1158634 main.go:141] libmachine: (functional-771033) DBG | domain functional-771033 has defined MAC address 52:54:00:39:c6:04 in network mk-functional-771033
I0818 18:46:55.851295 1158634 main.go:141] libmachine: (functional-771033) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:39:c6:04", ip: ""} in network mk-functional-771033: {Iface:virbr1 ExpiryTime:2024-08-18 19:45:17 +0000 UTC Type:0 Mac:52:54:00:39:c6:04 Iaid: IPaddr:192.168.39.95 Prefix:24 Hostname:functional-771033 Clientid:01:52:54:00:39:c6:04}
I0818 18:46:55.851314 1158634 main.go:141] libmachine: (functional-771033) DBG | domain functional-771033 has defined IP address 192.168.39.95 and MAC address 52:54:00:39:c6:04 in network mk-functional-771033
I0818 18:46:55.851421 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHPort
I0818 18:46:55.851743 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHKeyPath
I0818 18:46:55.851891 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHUsername
I0818 18:46:55.852011 1158634 sshutil.go:53] new ssh client: &{IP:192.168.39.95 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/19423-1145725/.minikube/machines/functional-771033/id_rsa Username:docker}
I0818 18:46:55.940830 1158634 ssh_runner.go:195] Run: cat /etc/os-release
I0818 18:46:55.945224 1158634 info.go:137] Remote host: Buildroot 2023.02.9
I0818 18:46:55.945242 1158634 filesync.go:126] Scanning /home/jenkins/minikube-integration/19423-1145725/.minikube/addons for local assets ...
I0818 18:46:55.945305 1158634 filesync.go:126] Scanning /home/jenkins/minikube-integration/19423-1145725/.minikube/files for local assets ...
I0818 18:46:55.945385 1158634 filesync.go:149] local asset: /home/jenkins/minikube-integration/19423-1145725/.minikube/files/etc/ssl/certs/11529002.pem -> 11529002.pem in /etc/ssl/certs
I0818 18:46:55.945465 1158634 filesync.go:149] local asset: /home/jenkins/minikube-integration/19423-1145725/.minikube/files/etc/test/nested/copy/1152900/hosts -> hosts in /etc/test/nested/copy/1152900
I0818 18:46:55.945503 1158634 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs /etc/test/nested/copy/1152900
I0818 18:46:55.955184 1158634 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19423-1145725/.minikube/files/etc/ssl/certs/11529002.pem --> /etc/ssl/certs/11529002.pem (1708 bytes)
I0818 18:46:55.978846 1158634 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19423-1145725/.minikube/files/etc/test/nested/copy/1152900/hosts --> /etc/test/nested/copy/1152900/hosts (40 bytes)
I0818 18:46:56.002774 1158634 start.go:296] duration metric: took 155.07696ms for postStartSetup
I0818 18:46:56.002812 1158634 fix.go:56] duration metric: took 1.285738596s for fixHost
I0818 18:46:56.002836 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHHostname
I0818 18:46:56.006062 1158634 main.go:141] libmachine: (functional-771033) DBG | domain functional-771033 has defined MAC address 52:54:00:39:c6:04 in network mk-functional-771033
I0818 18:46:56.006460 1158634 main.go:141] libmachine: (functional-771033) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:39:c6:04", ip: ""} in network mk-functional-771033: {Iface:virbr1 ExpiryTime:2024-08-18 19:45:17 +0000 UTC Type:0 Mac:52:54:00:39:c6:04 Iaid: IPaddr:192.168.39.95 Prefix:24 Hostname:functional-771033 Clientid:01:52:54:00:39:c6:04}
I0818 18:46:56.006487 1158634 main.go:141] libmachine: (functional-771033) DBG | domain functional-771033 has defined IP address 192.168.39.95 and MAC address 52:54:00:39:c6:04 in network mk-functional-771033
I0818 18:46:56.006672 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHPort
I0818 18:46:56.006877 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHKeyPath
I0818 18:46:56.007125 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHKeyPath
I0818 18:46:56.007227 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHUsername
I0818 18:46:56.007458 1158634 main.go:141] libmachine: Using SSH client type: native
I0818 18:46:56.007634 1158634 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x82f9c0] 0x832720 <nil> [] 0s} 192.168.39.95 22 <nil> <nil>}
I0818 18:46:56.007640 1158634 main.go:141] libmachine: About to run SSH command:
date +%s.%N
I0818 18:46:56.126457 1158634 main.go:141] libmachine: SSH cmd err, output: <nil>: 1724006816.105509244
I0818 18:46:56.126471 1158634 fix.go:216] guest clock: 1724006816.105509244
I0818 18:46:56.126477 1158634 fix.go:229] Guest: 2024-08-18 18:46:56.105509244 +0000 UTC Remote: 2024-08-18 18:46:56.002815287 +0000 UTC m=+1.404895046 (delta=102.693957ms)
I0818 18:46:56.126502 1158634 fix.go:200] guest clock delta is within tolerance: 102.693957ms
I0818 18:46:56.126508 1158634 start.go:83] releasing machines lock for "functional-771033", held for 1.409443045s
I0818 18:46:56.126529 1158634 main.go:141] libmachine: (functional-771033) Calling .DriverName
I0818 18:46:56.126839 1158634 main.go:141] libmachine: (functional-771033) Calling .GetIP
I0818 18:46:56.129589 1158634 main.go:141] libmachine: (functional-771033) DBG | domain functional-771033 has defined MAC address 52:54:00:39:c6:04 in network mk-functional-771033
I0818 18:46:56.129978 1158634 main.go:141] libmachine: (functional-771033) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:39:c6:04", ip: ""} in network mk-functional-771033: {Iface:virbr1 ExpiryTime:2024-08-18 19:45:17 +0000 UTC Type:0 Mac:52:54:00:39:c6:04 Iaid: IPaddr:192.168.39.95 Prefix:24 Hostname:functional-771033 Clientid:01:52:54:00:39:c6:04}
I0818 18:46:56.129998 1158634 main.go:141] libmachine: (functional-771033) DBG | domain functional-771033 has defined IP address 192.168.39.95 and MAC address 52:54:00:39:c6:04 in network mk-functional-771033
I0818 18:46:56.130094 1158634 main.go:141] libmachine: (functional-771033) Calling .DriverName
I0818 18:46:56.130720 1158634 main.go:141] libmachine: (functional-771033) Calling .DriverName
I0818 18:46:56.130901 1158634 main.go:141] libmachine: (functional-771033) Calling .DriverName
I0818 18:46:56.130970 1158634 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0818 18:46:56.131010 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHHostname
I0818 18:46:56.131080 1158634 ssh_runner.go:195] Run: cat /version.json
I0818 18:46:56.131106 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHHostname
I0818 18:46:56.133850 1158634 main.go:141] libmachine: (functional-771033) DBG | domain functional-771033 has defined MAC address 52:54:00:39:c6:04 in network mk-functional-771033
I0818 18:46:56.134101 1158634 main.go:141] libmachine: (functional-771033) DBG | domain functional-771033 has defined MAC address 52:54:00:39:c6:04 in network mk-functional-771033
I0818 18:46:56.134219 1158634 main.go:141] libmachine: (functional-771033) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:39:c6:04", ip: ""} in network mk-functional-771033: {Iface:virbr1 ExpiryTime:2024-08-18 19:45:17 +0000 UTC Type:0 Mac:52:54:00:39:c6:04 Iaid: IPaddr:192.168.39.95 Prefix:24 Hostname:functional-771033 Clientid:01:52:54:00:39:c6:04}
I0818 18:46:56.134238 1158634 main.go:141] libmachine: (functional-771033) DBG | domain functional-771033 has defined IP address 192.168.39.95 and MAC address 52:54:00:39:c6:04 in network mk-functional-771033
I0818 18:46:56.134395 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHPort
I0818 18:46:56.134501 1158634 main.go:141] libmachine: (functional-771033) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:39:c6:04", ip: ""} in network mk-functional-771033: {Iface:virbr1 ExpiryTime:2024-08-18 19:45:17 +0000 UTC Type:0 Mac:52:54:00:39:c6:04 Iaid: IPaddr:192.168.39.95 Prefix:24 Hostname:functional-771033 Clientid:01:52:54:00:39:c6:04}
I0818 18:46:56.134524 1158634 main.go:141] libmachine: (functional-771033) DBG | domain functional-771033 has defined IP address 192.168.39.95 and MAC address 52:54:00:39:c6:04 in network mk-functional-771033
I0818 18:46:56.134559 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHKeyPath
I0818 18:46:56.134723 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHUsername
I0818 18:46:56.134723 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHPort
I0818 18:46:56.134887 1158634 sshutil.go:53] new ssh client: &{IP:192.168.39.95 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/19423-1145725/.minikube/machines/functional-771033/id_rsa Username:docker}
I0818 18:46:56.134961 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHKeyPath
I0818 18:46:56.135075 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHUsername
I0818 18:46:56.135211 1158634 sshutil.go:53] new ssh client: &{IP:192.168.39.95 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/19423-1145725/.minikube/machines/functional-771033/id_rsa Username:docker}
I0818 18:46:56.232976 1158634 ssh_runner.go:195] Run: systemctl --version
I0818 18:46:56.238916 1158634 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
W0818 18:46:56.245197 1158634 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found
I0818 18:46:56.245256 1158634 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0818 18:46:56.254202 1158634 cni.go:259] no active bridge cni configs found in "/etc/cni/net.d" - nothing to disable
I0818 18:46:56.254216 1158634 start.go:495] detecting cgroup driver to use...
I0818 18:46:56.254343 1158634 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0818 18:46:56.273157 1158634 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10"|' /etc/containerd/config.toml"
I0818 18:46:56.283299 1158634 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0818 18:46:56.293260 1158634 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I0818 18:46:56.293309 1158634 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I0818 18:46:56.304206 1158634 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0818 18:46:56.315128 1158634 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0818 18:46:56.325639 1158634 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0818 18:46:56.337923 1158634 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0818 18:46:56.349286 1158634 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0818 18:46:56.360304 1158634 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0818 18:46:56.370962 1158634 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0818 18:46:56.381221 1158634 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0818 18:46:56.391398 1158634 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0818 18:46:56.401588 1158634 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0818 18:46:56.560635 1158634 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0818 18:46:56.586933 1158634 start.go:495] detecting cgroup driver to use...
I0818 18:46:56.587031 1158634 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I0818 18:46:56.608293 1158634 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I0818 18:46:56.624116 1158634 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
I0818 18:46:56.644433 1158634 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I0818 18:46:56.660301 1158634 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0818 18:46:56.674321 1158634 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I0818 18:46:56.693581 1158634 ssh_runner.go:195] Run: which cri-dockerd
I0818 18:46:56.697373 1158634 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I0818 18:46:56.708244 1158634 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (190 bytes)
I0818 18:46:56.725176 1158634 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I0818 18:46:56.885397 1158634 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I0818 18:46:57.044654 1158634 docker.go:574] configuring docker to use "cgroupfs" as cgroup driver...
I0818 18:46:57.044781 1158634 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
I0818 18:46:57.063853 1158634 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0818 18:46:57.217602 1158634 ssh_runner.go:195] Run: sudo systemctl restart docker
I0818 18:47:09.820788 1158634 ssh_runner.go:235] Completed: sudo systemctl restart docker: (12.603153107s)
I0818 18:47:09.820854 1158634 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I0818 18:47:09.837295 1158634 ssh_runner.go:195] Run: sudo systemctl stop cri-docker.socket
I0818 18:47:09.864161 1158634 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0818 18:47:09.878286 1158634 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I0818 18:47:10.003048 1158634 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I0818 18:47:10.146053 1158634 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0818 18:47:10.274428 1158634 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I0818 18:47:10.292400 1158634 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0818 18:47:10.305819 1158634 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0818 18:47:10.436130 1158634 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I0818 18:47:10.540991 1158634 start.go:542] Will wait 60s for socket path /var/run/cri-dockerd.sock
I0818 18:47:10.541053 1158634 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I0818 18:47:10.546726 1158634 start.go:563] Will wait 60s for crictl version
I0818 18:47:10.546776 1158634 ssh_runner.go:195] Run: which crictl
I0818 18:47:10.550839 1158634 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0818 18:47:10.588418 1158634 start.go:579] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 27.1.2
RuntimeApiVersion: v1
I0818 18:47:10.588476 1158634 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0818 18:47:10.611634 1158634 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0818 18:47:10.638535 1158634 out.go:235] * Preparing Kubernetes v1.31.0 on Docker 27.1.2 ...
I0818 18:47:10.638593 1158634 main.go:141] libmachine: (functional-771033) Calling .GetIP
I0818 18:47:10.641528 1158634 main.go:141] libmachine: (functional-771033) DBG | domain functional-771033 has defined MAC address 52:54:00:39:c6:04 in network mk-functional-771033
I0818 18:47:10.641841 1158634 main.go:141] libmachine: (functional-771033) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:39:c6:04", ip: ""} in network mk-functional-771033: {Iface:virbr1 ExpiryTime:2024-08-18 19:45:17 +0000 UTC Type:0 Mac:52:54:00:39:c6:04 Iaid: IPaddr:192.168.39.95 Prefix:24 Hostname:functional-771033 Clientid:01:52:54:00:39:c6:04}
I0818 18:47:10.641860 1158634 main.go:141] libmachine: (functional-771033) DBG | domain functional-771033 has defined IP address 192.168.39.95 and MAC address 52:54:00:39:c6:04 in network mk-functional-771033
I0818 18:47:10.642158 1158634 ssh_runner.go:195] Run: grep 192.168.39.1 host.minikube.internal$ /etc/hosts
I0818 18:47:10.647876 1158634 out.go:177] - apiserver.enable-admission-plugins=NamespaceAutoProvision
I0818 18:47:10.648927 1158634 kubeadm.go:883] updating cluster {Name:functional-771033 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/19452/minikube-v1.33.1-1723740674-19452-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d Memory:4000 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8441 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1
.31.0 ClusterName:functional-771033 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.39.95 Port:8441 KubernetesVersion:v1.31.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false Mo
untString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I0818 18:47:10.649038 1158634 preload.go:131] Checking if preload exists for k8s version v1.31.0 and runtime docker
I0818 18:47:10.649093 1158634 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0818 18:47:10.665729 1158634 docker.go:685] Got preloaded images: -- stdout --
minikube-local-cache-test:functional-771033
registry.k8s.io/kube-apiserver:v1.31.0
registry.k8s.io/kube-scheduler:v1.31.0
registry.k8s.io/kube-controller-manager:v1.31.0
registry.k8s.io/kube-proxy:v1.31.0
registry.k8s.io/etcd:3.5.15-0
registry.k8s.io/pause:3.10
registry.k8s.io/coredns/coredns:v1.11.1
gcr.io/k8s-minikube/storage-provisioner:v5
registry.k8s.io/pause:3.3
registry.k8s.io/pause:3.1
registry.k8s.io/pause:latest
-- /stdout --
I0818 18:47:10.665743 1158634 docker.go:615] Images already preloaded, skipping extraction
I0818 18:47:10.665813 1158634 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0818 18:47:10.684051 1158634 docker.go:685] Got preloaded images: -- stdout --
minikube-local-cache-test:functional-771033
registry.k8s.io/kube-scheduler:v1.31.0
registry.k8s.io/kube-apiserver:v1.31.0
registry.k8s.io/kube-controller-manager:v1.31.0
registry.k8s.io/kube-proxy:v1.31.0
registry.k8s.io/etcd:3.5.15-0
registry.k8s.io/pause:3.10
registry.k8s.io/coredns/coredns:v1.11.1
gcr.io/k8s-minikube/storage-provisioner:v5
registry.k8s.io/pause:3.3
registry.k8s.io/pause:3.1
registry.k8s.io/pause:latest
-- /stdout --
I0818 18:47:10.684077 1158634 cache_images.go:84] Images are preloaded, skipping loading
I0818 18:47:10.684097 1158634 kubeadm.go:934] updating node { 192.168.39.95 8441 v1.31.0 docker true true} ...
I0818 18:47:10.684244 1158634 kubeadm.go:946] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.31.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=functional-771033 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.39.95
[Install]
config:
{KubernetesVersion:v1.31.0 ClusterName:functional-771033 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0818 18:47:10.684320 1158634 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
I0818 18:47:10.738070 1158634 extraconfig.go:124] Overwriting default enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota with user provided enable-admission-plugins=NamespaceAutoProvision for component apiserver
I0818 18:47:10.738178 1158634 cni.go:84] Creating CNI manager for ""
I0818 18:47:10.738198 1158634 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0818 18:47:10.738209 1158634 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
I0818 18:47:10.738236 1158634 kubeadm.go:181] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.39.95 APIServerPort:8441 KubernetesVersion:v1.31.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:functional-771033 NodeName:functional-771033 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceAutoProvision] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.39.95"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.39.95 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOp
ts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I0818 18:47:10.738411 1158634 kubeadm.go:187] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.39.95
bindPort: 8441
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
name: "functional-771033"
kubeletExtraArgs:
node-ip: 192.168.39.95
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.39.95"]
extraArgs:
enable-admission-plugins: "NamespaceAutoProvision"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8441
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.31.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I0818 18:47:10.738476 1158634 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.31.0
I0818 18:47:10.748279 1158634 binaries.go:44] Found k8s binaries, skipping transfer
I0818 18:47:10.748338 1158634 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I0818 18:47:10.757659 1158634 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (317 bytes)
I0818 18:47:10.774794 1158634 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0818 18:47:10.790671 1158634 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2012 bytes)
I0818 18:47:10.806965 1158634 ssh_runner.go:195] Run: grep 192.168.39.95 control-plane.minikube.internal$ /etc/hosts
I0818 18:47:10.811624 1158634 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0818 18:47:10.953614 1158634 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0818 18:47:10.972513 1158634 certs.go:68] Setting up /home/jenkins/minikube-integration/19423-1145725/.minikube/profiles/functional-771033 for IP: 192.168.39.95
I0818 18:47:10.972530 1158634 certs.go:194] generating shared ca certs ...
I0818 18:47:10.972553 1158634 certs.go:226] acquiring lock for ca certs: {Name:mk13776990cc7cce8623bb9f7048b7fd53736611 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0818 18:47:10.972757 1158634 certs.go:235] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/19423-1145725/.minikube/ca.key
I0818 18:47:10.972812 1158634 certs.go:235] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/19423-1145725/.minikube/proxy-client-ca.key
I0818 18:47:10.972822 1158634 certs.go:256] generating profile certs ...
I0818 18:47:10.972945 1158634 certs.go:359] skipping valid signed profile cert regeneration for "minikube-user": /home/jenkins/minikube-integration/19423-1145725/.minikube/profiles/functional-771033/client.key
I0818 18:47:10.973007 1158634 certs.go:359] skipping valid signed profile cert regeneration for "minikube": /home/jenkins/minikube-integration/19423-1145725/.minikube/profiles/functional-771033/apiserver.key.cf650114
I0818 18:47:10.973054 1158634 certs.go:359] skipping valid signed profile cert regeneration for "aggregator": /home/jenkins/minikube-integration/19423-1145725/.minikube/profiles/functional-771033/proxy-client.key
I0818 18:47:10.973200 1158634 certs.go:484] found cert: /home/jenkins/minikube-integration/19423-1145725/.minikube/certs/1152900.pem (1338 bytes)
W0818 18:47:10.973284 1158634 certs.go:480] ignoring /home/jenkins/minikube-integration/19423-1145725/.minikube/certs/1152900_empty.pem, impossibly tiny 0 bytes
I0818 18:47:10.973293 1158634 certs.go:484] found cert: /home/jenkins/minikube-integration/19423-1145725/.minikube/certs/ca-key.pem (1675 bytes)
I0818 18:47:10.973328 1158634 certs.go:484] found cert: /home/jenkins/minikube-integration/19423-1145725/.minikube/certs/ca.pem (1078 bytes)
I0818 18:47:10.973359 1158634 certs.go:484] found cert: /home/jenkins/minikube-integration/19423-1145725/.minikube/certs/cert.pem (1123 bytes)
I0818 18:47:10.973384 1158634 certs.go:484] found cert: /home/jenkins/minikube-integration/19423-1145725/.minikube/certs/key.pem (1679 bytes)
I0818 18:47:10.973435 1158634 certs.go:484] found cert: /home/jenkins/minikube-integration/19423-1145725/.minikube/files/etc/ssl/certs/11529002.pem (1708 bytes)
I0818 18:47:10.974350 1158634 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19423-1145725/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0818 18:47:11.039202 1158634 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19423-1145725/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I0818 18:47:11.112193 1158634 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19423-1145725/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0818 18:47:11.151405 1158634 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19423-1145725/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I0818 18:47:11.184940 1158634 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19423-1145725/.minikube/profiles/functional-771033/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1424 bytes)
I0818 18:47:11.222071 1158634 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19423-1145725/.minikube/profiles/functional-771033/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I0818 18:47:11.255814 1158634 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19423-1145725/.minikube/profiles/functional-771033/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0818 18:47:11.296026 1158634 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19423-1145725/.minikube/profiles/functional-771033/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I0818 18:47:11.327001 1158634 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19423-1145725/.minikube/certs/1152900.pem --> /usr/share/ca-certificates/1152900.pem (1338 bytes)
I0818 18:47:11.357765 1158634 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19423-1145725/.minikube/files/etc/ssl/certs/11529002.pem --> /usr/share/ca-certificates/11529002.pem (1708 bytes)
I0818 18:47:11.393740 1158634 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19423-1145725/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0818 18:47:11.433484 1158634 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I0818 18:47:11.502541 1158634 ssh_runner.go:195] Run: openssl version
I0818 18:47:11.514444 1158634 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/1152900.pem && ln -fs /usr/share/ca-certificates/1152900.pem /etc/ssl/certs/1152900.pem"
I0818 18:47:11.543132 1158634 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/1152900.pem
I0818 18:47:11.556068 1158634 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Aug 18 18:45 /usr/share/ca-certificates/1152900.pem
I0818 18:47:11.556171 1158634 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/1152900.pem
I0818 18:47:11.575627 1158634 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/1152900.pem /etc/ssl/certs/51391683.0"
I0818 18:47:11.604967 1158634 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/11529002.pem && ln -fs /usr/share/ca-certificates/11529002.pem /etc/ssl/certs/11529002.pem"
I0818 18:47:11.631266 1158634 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/11529002.pem
I0818 18:47:11.638568 1158634 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Aug 18 18:45 /usr/share/ca-certificates/11529002.pem
I0818 18:47:11.638636 1158634 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/11529002.pem
I0818 18:47:11.656890 1158634 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/11529002.pem /etc/ssl/certs/3ec20f2e.0"
I0818 18:47:11.681063 1158634 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0818 18:47:11.701548 1158634 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0818 18:47:11.710610 1158634 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Aug 18 18:38 /usr/share/ca-certificates/minikubeCA.pem
I0818 18:47:11.710682 1158634 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0818 18:47:11.718975 1158634 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0818 18:47:11.733642 1158634 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0818 18:47:11.740563 1158634 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-etcd-client.crt -checkend 86400
I0818 18:47:11.750590 1158634 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-kubelet-client.crt -checkend 86400
I0818 18:47:11.765553 1158634 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/server.crt -checkend 86400
I0818 18:47:11.774055 1158634 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/healthcheck-client.crt -checkend 86400
I0818 18:47:11.781486 1158634 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/peer.crt -checkend 86400
I0818 18:47:11.791556 1158634 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/front-proxy-client.crt -checkend 86400
I0818 18:47:11.800107 1158634 kubeadm.go:392] StartCluster: {Name:functional-771033 KeepContext:false EmbedCerts:false MinikubeISO:https://storage.googleapis.com/minikube-builds/iso/19452/minikube-v1.33.1-1723740674-19452-amd64.iso KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723740748-19452@sha256:2211a6931895d2d502e957e9667096db10734a96767d670cb4dbffdd37397b0d Memory:4000 CPUs:2 DiskSize:20000 Driver:kvm2 HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8441 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31
.0 ClusterName:functional-771033 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.39.95 Port:8441 KubernetesVersion:v1.31.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false Mount
String:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0818 18:47:11.800233 1158634 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I0818 18:47:11.837654 1158634 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I0818 18:47:11.858225 1158634 kubeadm.go:408] found existing configuration files, will attempt cluster restart
I0818 18:47:11.858237 1158634 kubeadm.go:593] restartPrimaryControlPlane start ...
I0818 18:47:11.858288 1158634 ssh_runner.go:195] Run: sudo test -d /data/minikube
I0818 18:47:11.871516 1158634 kubeadm.go:130] /data/minikube skipping compat symlinks: sudo test -d /data/minikube: Process exited with status 1
stdout:
stderr:
I0818 18:47:11.872236 1158634 kubeconfig.go:125] found "functional-771033" server: "https://192.168.39.95:8441"
I0818 18:47:11.874090 1158634 ssh_runner.go:195] Run: sudo diff -u /var/tmp/minikube/kubeadm.yaml /var/tmp/minikube/kubeadm.yaml.new
I0818 18:47:11.886177 1158634 kubeadm.go:640] detected kubeadm config drift (will reconfigure cluster from new /var/tmp/minikube/kubeadm.yaml):
-- stdout --
--- /var/tmp/minikube/kubeadm.yaml
+++ /var/tmp/minikube/kubeadm.yaml.new
@@ -22,7 +22,7 @@
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.39.95"]
extraArgs:
- enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
+ enable-admission-plugins: "NamespaceAutoProvision"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
-- /stdout --
I0818 18:47:11.886201 1158634 kubeadm.go:1160] stopping kube-system containers ...
I0818 18:47:11.886263 1158634 ssh_runner.go:195] Run: docker ps -a --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I0818 18:47:11.952557 1158634 docker.go:483] Stopping containers: [c4de3f53a26d e1a825e0ca44 5ad077dac72a 35ee16d13b79 d4a6727a157c c5771ab1f6ce 145b77ad63ec cde473f8e984 3537ffdbdfbb dc0bf8140a35 9e1e1cee3c41 163d5c0124cc 3c04014a97de 342f9a57301e b8dd751802bb b3348056df2c dae9f1201bf6 a6bf6cd3f233 759c12c404eb a706b381bbcf 720e87f4d1c6 e5293e30dad2 522687fd9f11 c804893854e6 533898c09d04 b5f81e9ed1e2 6f9672488822 4730bd898530 864379f94945 03033ee26187 5a6d47de9a00 14a3c6896f5d bab252c4dcac c0e3f9792225]
I0818 18:47:11.952651 1158634 ssh_runner.go:195] Run: docker stop c4de3f53a26d e1a825e0ca44 5ad077dac72a 35ee16d13b79 d4a6727a157c c5771ab1f6ce 145b77ad63ec cde473f8e984 3537ffdbdfbb dc0bf8140a35 9e1e1cee3c41 163d5c0124cc 3c04014a97de 342f9a57301e b8dd751802bb b3348056df2c dae9f1201bf6 a6bf6cd3f233 759c12c404eb a706b381bbcf 720e87f4d1c6 e5293e30dad2 522687fd9f11 c804893854e6 533898c09d04 b5f81e9ed1e2 6f9672488822 4730bd898530 864379f94945 03033ee26187 5a6d47de9a00 14a3c6896f5d bab252c4dcac c0e3f9792225
I0818 18:47:12.451354 1158634 ssh_runner.go:195] Run: sudo systemctl stop kubelet
I0818 18:47:12.498570 1158634 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I0818 18:47:12.508705 1158634 kubeadm.go:157] found existing configuration files:
-rw------- 1 root root 5647 Aug 18 18:45 /etc/kubernetes/admin.conf
-rw------- 1 root root 5653 Aug 18 18:46 /etc/kubernetes/controller-manager.conf
-rw------- 1 root root 2007 Aug 18 18:45 /etc/kubernetes/kubelet.conf
-rw------- 1 root root 5601 Aug 18 18:46 /etc/kubernetes/scheduler.conf
I0818 18:47:12.508763 1158634 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/admin.conf
I0818 18:47:12.517439 1158634 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/kubelet.conf
I0818 18:47:12.526044 1158634 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/controller-manager.conf
I0818 18:47:12.535422 1158634 kubeadm.go:163] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/controller-manager.conf: Process exited with status 1
stdout:
stderr:
I0818 18:47:12.535463 1158634 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I0818 18:47:12.544396 1158634 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/scheduler.conf
I0818 18:47:12.552728 1158634 kubeadm.go:163] "https://control-plane.minikube.internal:8441" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8441 /etc/kubernetes/scheduler.conf: Process exited with status 1
stdout:
stderr:
I0818 18:47:12.552769 1158634 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I0818 18:47:12.561428 1158634 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I0818 18:47:12.570462 1158634 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.31.0:$PATH" kubeadm init phase certs all --config /var/tmp/minikube/kubeadm.yaml"
I0818 18:47:12.619724 1158634 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.31.0:$PATH" kubeadm init phase kubeconfig all --config /var/tmp/minikube/kubeadm.yaml"
I0818 18:47:13.497984 1158634 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.31.0:$PATH" kubeadm init phase kubelet-start --config /var/tmp/minikube/kubeadm.yaml"
I0818 18:47:13.703218 1158634 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.31.0:$PATH" kubeadm init phase control-plane all --config /var/tmp/minikube/kubeadm.yaml"
I0818 18:47:13.796445 1158634 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.31.0:$PATH" kubeadm init phase etcd local --config /var/tmp/minikube/kubeadm.yaml"
I0818 18:47:13.903564 1158634 api_server.go:52] waiting for apiserver process to appear ...
I0818 18:47:13.903650 1158634 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0818 18:47:14.404736 1158634 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0818 18:47:14.904473 1158634 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0818 18:47:14.924509 1158634 api_server.go:72] duration metric: took 1.020961281s to wait for apiserver process to appear ...
I0818 18:47:14.924527 1158634 api_server.go:88] waiting for apiserver healthz status ...
I0818 18:47:14.924552 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:14.925020 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:15.425638 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:18.104673 1158634 api_server.go:279] https://192.168.39.95:8441/healthz returned 403:
{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
W0818 18:47:18.104698 1158634 api_server.go:103] status: https://192.168.39.95:8441/healthz returned error 403:
{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
I0818 18:47:18.104712 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:18.142465 1158634 api_server.go:279] https://192.168.39.95:8441/healthz returned 403:
{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
W0818 18:47:18.142489 1158634 api_server.go:103] status: https://192.168.39.95:8441/healthz returned error 403:
{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
I0818 18:47:18.424879 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:18.431392 1158634 api_server.go:279] https://192.168.39.95:8441/healthz returned 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
W0818 18:47:18.431412 1158634 api_server.go:103] status: https://192.168.39.95:8441/healthz returned error 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
I0818 18:47:18.924942 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:18.931936 1158634 api_server.go:279] https://192.168.39.95:8441/healthz returned 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
W0818 18:47:18.931970 1158634 api_server.go:103] status: https://192.168.39.95:8441/healthz returned error 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
I0818 18:47:19.425592 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:19.437153 1158634 api_server.go:279] https://192.168.39.95:8441/healthz returned 200:
ok
I0818 18:47:19.462483 1158634 api_server.go:141] control plane version: v1.31.0
I0818 18:47:19.462508 1158634 api_server.go:131] duration metric: took 4.537975265s to wait for apiserver health ...
I0818 18:47:19.462524 1158634 cni.go:84] Creating CNI manager for ""
I0818 18:47:19.462536 1158634 cni.go:158] "kvm2" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0818 18:47:19.463979 1158634 out.go:177] * Configuring bridge CNI (Container Networking Interface) ...
I0818 18:47:19.465625 1158634 ssh_runner.go:195] Run: sudo mkdir -p /etc/cni/net.d
I0818 18:47:19.484837 1158634 ssh_runner.go:362] scp memory --> /etc/cni/net.d/1-k8s.conflist (496 bytes)
I0818 18:47:19.527700 1158634 system_pods.go:43] waiting for kube-system pods to appear ...
I0818 18:47:19.539937 1158634 system_pods.go:59] 7 kube-system pods found
I0818 18:47:19.539960 1158634 system_pods.go:61] "coredns-6f6b679f8f-jr2fb" [590318eb-621f-4f74-b5be-0b6268a28d4d] Running / Ready:ContainersNotReady (containers with unready status: [coredns]) / ContainersReady:ContainersNotReady (containers with unready status: [coredns])
I0818 18:47:19.539969 1158634 system_pods.go:61] "etcd-functional-771033" [198cabf4-335e-4a96-b8ff-296969689489] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd])
I0818 18:47:19.539976 1158634 system_pods.go:61] "kube-apiserver-functional-771033" [50144038-6f50-40ec-91d8-5c6157da045a] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I0818 18:47:19.539981 1158634 system_pods.go:61] "kube-controller-manager-functional-771033" [18d5e8f2-ab21-4d53-a1d6-2259043375d6] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager])
I0818 18:47:19.539986 1158634 system_pods.go:61] "kube-proxy-f6krv" [6d61848c-ac48-4004-bbfd-99325c6c6b5e] Running / Ready:ContainersNotReady (containers with unready status: [kube-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-proxy])
I0818 18:47:19.539995 1158634 system_pods.go:61] "kube-scheduler-functional-771033" [17e856bc-5dd1-4979-8d62-5ea894a05851] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler])
I0818 18:47:19.539999 1158634 system_pods.go:61] "storage-provisioner" [01c74b7d-d168-47d2-8415-af0dcd45453e] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I0818 18:47:19.540007 1158634 system_pods.go:74] duration metric: took 12.291854ms to wait for pod list to return data ...
I0818 18:47:19.540013 1158634 node_conditions.go:102] verifying NodePressure condition ...
I0818 18:47:19.544841 1158634 node_conditions.go:122] node storage ephemeral capacity is 17734596Ki
I0818 18:47:19.544862 1158634 node_conditions.go:123] node cpu capacity is 2
I0818 18:47:19.544876 1158634 node_conditions.go:105] duration metric: took 4.858343ms to run NodePressure ...
I0818 18:47:19.544898 1158634 ssh_runner.go:195] Run: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.31.0:$PATH" kubeadm init phase addon all --config /var/tmp/minikube/kubeadm.yaml"
I0818 18:47:19.877110 1158634 kubeadm.go:724] waiting for restarted kubelet to initialise ...
I0818 18:47:19.881450 1158634 kubeadm.go:739] kubelet initialised
I0818 18:47:19.881459 1158634 kubeadm.go:740] duration metric: took 4.334239ms waiting for restarted kubelet to initialise ...
I0818 18:47:19.881466 1158634 pod_ready.go:36] extra waiting up to 4m0s for all system-critical pods including labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0818 18:47:19.885868 1158634 pod_ready.go:79] waiting up to 4m0s for pod "coredns-6f6b679f8f-jr2fb" in "kube-system" namespace to be "Ready" ...
I0818 18:47:21.897624 1158634 pod_ready.go:103] pod "coredns-6f6b679f8f-jr2fb" in "kube-system" namespace has status "Ready":"False"
I0818 18:47:22.393122 1158634 pod_ready.go:93] pod "coredns-6f6b679f8f-jr2fb" in "kube-system" namespace has status "Ready":"True"
I0818 18:47:22.393135 1158634 pod_ready.go:82] duration metric: took 2.507253626s for pod "coredns-6f6b679f8f-jr2fb" in "kube-system" namespace to be "Ready" ...
I0818 18:47:22.393144 1158634 pod_ready.go:79] waiting up to 4m0s for pod "etcd-functional-771033" in "kube-system" namespace to be "Ready" ...
I0818 18:47:24.399711 1158634 pod_ready.go:103] pod "etcd-functional-771033" in "kube-system" namespace has status "Ready":"False"
I0818 18:47:26.899570 1158634 pod_ready.go:103] pod "etcd-functional-771033" in "kube-system" namespace has status "Ready":"False"
I0818 18:47:28.901947 1158634 pod_ready.go:93] pod "etcd-functional-771033" in "kube-system" namespace has status "Ready":"True"
I0818 18:47:28.901963 1158634 pod_ready.go:82] duration metric: took 6.50881179s for pod "etcd-functional-771033" in "kube-system" namespace to be "Ready" ...
I0818 18:47:28.901973 1158634 pod_ready.go:79] waiting up to 4m0s for pod "kube-apiserver-functional-771033" in "kube-system" namespace to be "Ready" ...
I0818 18:47:28.908902 1158634 pod_ready.go:93] pod "kube-apiserver-functional-771033" in "kube-system" namespace has status "Ready":"True"
I0818 18:47:28.908913 1158634 pod_ready.go:82] duration metric: took 6.933053ms for pod "kube-apiserver-functional-771033" in "kube-system" namespace to be "Ready" ...
I0818 18:47:28.908923 1158634 pod_ready.go:79] waiting up to 4m0s for pod "kube-controller-manager-functional-771033" in "kube-system" namespace to be "Ready" ...
I0818 18:47:28.913147 1158634 pod_ready.go:93] pod "kube-controller-manager-functional-771033" in "kube-system" namespace has status "Ready":"True"
I0818 18:47:28.913156 1158634 pod_ready.go:82] duration metric: took 4.226132ms for pod "kube-controller-manager-functional-771033" in "kube-system" namespace to be "Ready" ...
I0818 18:47:28.913165 1158634 pod_ready.go:79] waiting up to 4m0s for pod "kube-proxy-f6krv" in "kube-system" namespace to be "Ready" ...
I0818 18:47:28.917153 1158634 pod_ready.go:93] pod "kube-proxy-f6krv" in "kube-system" namespace has status "Ready":"True"
I0818 18:47:28.917162 1158634 pod_ready.go:82] duration metric: took 3.990886ms for pod "kube-proxy-f6krv" in "kube-system" namespace to be "Ready" ...
I0818 18:47:28.917171 1158634 pod_ready.go:79] waiting up to 4m0s for pod "kube-scheduler-functional-771033" in "kube-system" namespace to be "Ready" ...
I0818 18:47:30.924201 1158634 pod_ready.go:103] pod "kube-scheduler-functional-771033" in "kube-system" namespace has status "Ready":"False"
I0818 18:47:31.923801 1158634 pod_ready.go:93] pod "kube-scheduler-functional-771033" in "kube-system" namespace has status "Ready":"True"
I0818 18:47:31.923814 1158634 pod_ready.go:82] duration metric: took 3.006637067s for pod "kube-scheduler-functional-771033" in "kube-system" namespace to be "Ready" ...
I0818 18:47:31.923822 1158634 pod_ready.go:39] duration metric: took 12.042348471s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0818 18:47:31.923839 1158634 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I0818 18:47:31.936031 1158634 ops.go:34] apiserver oom_adj: -16
I0818 18:47:31.936045 1158634 kubeadm.go:597] duration metric: took 20.077802345s to restartPrimaryControlPlane
I0818 18:47:31.936052 1158634 kubeadm.go:394] duration metric: took 20.135957474s to StartCluster
I0818 18:47:31.936071 1158634 settings.go:142] acquiring lock: {Name:mk4f0ebfd92664d0e6b948f3537153d6e758f3b9 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0818 18:47:31.936144 1158634 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/19423-1145725/kubeconfig
I0818 18:47:31.936920 1158634 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19423-1145725/kubeconfig: {Name:mk782a24d297ff5aa7e33558024aff5df1a9ed4b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0818 18:47:31.937222 1158634 start.go:235] Will wait 6m0s for node &{Name: IP:192.168.39.95 Port:8441 KubernetesVersion:v1.31.0 ContainerRuntime:docker ControlPlane:true Worker:true}
I0818 18:47:31.937262 1158634 addons.go:507] enable addons start: toEnable=map[ambassador:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false helm-tiller:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I0818 18:47:31.937324 1158634 addons.go:69] Setting storage-provisioner=true in profile "functional-771033"
I0818 18:47:31.937381 1158634 addons.go:234] Setting addon storage-provisioner=true in "functional-771033"
W0818 18:47:31.937386 1158634 addons.go:243] addon storage-provisioner should already be in state true
I0818 18:47:31.937376 1158634 addons.go:69] Setting default-storageclass=true in profile "functional-771033"
I0818 18:47:31.937413 1158634 host.go:66] Checking if "functional-771033" exists ...
I0818 18:47:31.937426 1158634 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "functional-771033"
I0818 18:47:31.937443 1158634 config.go:182] Loaded profile config "functional-771033": Driver=kvm2, ContainerRuntime=docker, KubernetesVersion=v1.31.0
I0818 18:47:31.937726 1158634 main.go:141] libmachine: Found binary path at /home/jenkins/workspace/KVM_Linux_integration/out/docker-machine-driver-kvm2
I0818 18:47:31.937768 1158634 main.go:141] libmachine: Launching plugin server for driver kvm2
I0818 18:47:31.937815 1158634 main.go:141] libmachine: Found binary path at /home/jenkins/workspace/KVM_Linux_integration/out/docker-machine-driver-kvm2
I0818 18:47:31.937852 1158634 main.go:141] libmachine: Launching plugin server for driver kvm2
I0818 18:47:31.938901 1158634 out.go:177] * Verifying Kubernetes components...
I0818 18:47:31.940517 1158634 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0818 18:47:31.953501 1158634 main.go:141] libmachine: Plugin server listening at address 127.0.0.1:43771
I0818 18:47:31.954090 1158634 main.go:141] libmachine: () Calling .GetVersion
I0818 18:47:31.954674 1158634 main.go:141] libmachine: Using API Version 1
I0818 18:47:31.954690 1158634 main.go:141] libmachine: () Calling .SetConfigRaw
I0818 18:47:31.955028 1158634 main.go:141] libmachine: () Calling .GetMachineName
I0818 18:47:31.955206 1158634 main.go:141] libmachine: (functional-771033) Calling .GetState
I0818 18:47:31.957681 1158634 main.go:141] libmachine: Plugin server listening at address 127.0.0.1:43031
I0818 18:47:31.958049 1158634 main.go:141] libmachine: () Calling .GetVersion
I0818 18:47:31.958055 1158634 addons.go:234] Setting addon default-storageclass=true in "functional-771033"
W0818 18:47:31.958067 1158634 addons.go:243] addon default-storageclass should already be in state true
I0818 18:47:31.958095 1158634 host.go:66] Checking if "functional-771033" exists ...
I0818 18:47:31.958520 1158634 main.go:141] libmachine: Found binary path at /home/jenkins/workspace/KVM_Linux_integration/out/docker-machine-driver-kvm2
I0818 18:47:31.958553 1158634 main.go:141] libmachine: Launching plugin server for driver kvm2
I0818 18:47:31.958597 1158634 main.go:141] libmachine: Using API Version 1
I0818 18:47:31.958614 1158634 main.go:141] libmachine: () Calling .SetConfigRaw
I0818 18:47:31.958984 1158634 main.go:141] libmachine: () Calling .GetMachineName
I0818 18:47:31.959655 1158634 main.go:141] libmachine: Found binary path at /home/jenkins/workspace/KVM_Linux_integration/out/docker-machine-driver-kvm2
I0818 18:47:31.959695 1158634 main.go:141] libmachine: Launching plugin server for driver kvm2
I0818 18:47:31.973880 1158634 main.go:141] libmachine: Plugin server listening at address 127.0.0.1:34067
I0818 18:47:31.974206 1158634 main.go:141] libmachine: Plugin server listening at address 127.0.0.1:39991
I0818 18:47:31.974439 1158634 main.go:141] libmachine: () Calling .GetVersion
I0818 18:47:31.974637 1158634 main.go:141] libmachine: () Calling .GetVersion
I0818 18:47:31.974904 1158634 main.go:141] libmachine: Using API Version 1
I0818 18:47:31.974920 1158634 main.go:141] libmachine: () Calling .SetConfigRaw
I0818 18:47:31.975086 1158634 main.go:141] libmachine: Using API Version 1
I0818 18:47:31.975106 1158634 main.go:141] libmachine: () Calling .SetConfigRaw
I0818 18:47:31.975186 1158634 main.go:141] libmachine: () Calling .GetMachineName
I0818 18:47:31.975449 1158634 main.go:141] libmachine: () Calling .GetMachineName
I0818 18:47:31.975653 1158634 main.go:141] libmachine: (functional-771033) Calling .GetState
I0818 18:47:31.975769 1158634 main.go:141] libmachine: Found binary path at /home/jenkins/workspace/KVM_Linux_integration/out/docker-machine-driver-kvm2
I0818 18:47:31.975807 1158634 main.go:141] libmachine: Launching plugin server for driver kvm2
I0818 18:47:31.977426 1158634 main.go:141] libmachine: (functional-771033) Calling .DriverName
I0818 18:47:31.979235 1158634 out.go:177] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I0818 18:47:31.980577 1158634 addons.go:431] installing /etc/kubernetes/addons/storage-provisioner.yaml
I0818 18:47:31.980586 1158634 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I0818 18:47:31.980599 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHHostname
I0818 18:47:31.983819 1158634 main.go:141] libmachine: (functional-771033) DBG | domain functional-771033 has defined MAC address 52:54:00:39:c6:04 in network mk-functional-771033
I0818 18:47:31.984327 1158634 main.go:141] libmachine: (functional-771033) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:39:c6:04", ip: ""} in network mk-functional-771033: {Iface:virbr1 ExpiryTime:2024-08-18 19:45:17 +0000 UTC Type:0 Mac:52:54:00:39:c6:04 Iaid: IPaddr:192.168.39.95 Prefix:24 Hostname:functional-771033 Clientid:01:52:54:00:39:c6:04}
I0818 18:47:31.984352 1158634 main.go:141] libmachine: (functional-771033) DBG | domain functional-771033 has defined IP address 192.168.39.95 and MAC address 52:54:00:39:c6:04 in network mk-functional-771033
I0818 18:47:31.984480 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHPort
I0818 18:47:31.984660 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHKeyPath
I0818 18:47:31.984781 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHUsername
I0818 18:47:31.984915 1158634 sshutil.go:53] new ssh client: &{IP:192.168.39.95 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/19423-1145725/.minikube/machines/functional-771033/id_rsa Username:docker}
I0818 18:47:31.991731 1158634 main.go:141] libmachine: Plugin server listening at address 127.0.0.1:39947
I0818 18:47:31.992291 1158634 main.go:141] libmachine: () Calling .GetVersion
I0818 18:47:31.992754 1158634 main.go:141] libmachine: Using API Version 1
I0818 18:47:31.992763 1158634 main.go:141] libmachine: () Calling .SetConfigRaw
I0818 18:47:31.993075 1158634 main.go:141] libmachine: () Calling .GetMachineName
I0818 18:47:31.993294 1158634 main.go:141] libmachine: (functional-771033) Calling .GetState
I0818 18:47:31.994772 1158634 main.go:141] libmachine: (functional-771033) Calling .DriverName
I0818 18:47:31.994999 1158634 addons.go:431] installing /etc/kubernetes/addons/storageclass.yaml
I0818 18:47:31.995009 1158634 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I0818 18:47:31.995027 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHHostname
I0818 18:47:31.997976 1158634 main.go:141] libmachine: (functional-771033) DBG | domain functional-771033 has defined MAC address 52:54:00:39:c6:04 in network mk-functional-771033
I0818 18:47:31.998407 1158634 main.go:141] libmachine: (functional-771033) DBG | found host DHCP lease matching {name: "", mac: "52:54:00:39:c6:04", ip: ""} in network mk-functional-771033: {Iface:virbr1 ExpiryTime:2024-08-18 19:45:17 +0000 UTC Type:0 Mac:52:54:00:39:c6:04 Iaid: IPaddr:192.168.39.95 Prefix:24 Hostname:functional-771033 Clientid:01:52:54:00:39:c6:04}
I0818 18:47:31.998441 1158634 main.go:141] libmachine: (functional-771033) DBG | domain functional-771033 has defined IP address 192.168.39.95 and MAC address 52:54:00:39:c6:04 in network mk-functional-771033
I0818 18:47:31.998592 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHPort
I0818 18:47:31.998756 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHKeyPath
I0818 18:47:31.998900 1158634 main.go:141] libmachine: (functional-771033) Calling .GetSSHUsername
I0818 18:47:31.999089 1158634 sshutil.go:53] new ssh client: &{IP:192.168.39.95 Port:22 SSHKeyPath:/home/jenkins/minikube-integration/19423-1145725/.minikube/machines/functional-771033/id_rsa Username:docker}
I0818 18:47:32.139907 1158634 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0818 18:47:32.154798 1158634 node_ready.go:35] waiting up to 6m0s for node "functional-771033" to be "Ready" ...
I0818 18:47:32.159114 1158634 node_ready.go:49] node "functional-771033" has status "Ready":"True"
I0818 18:47:32.159128 1158634 node_ready.go:38] duration metric: took 4.301629ms for node "functional-771033" to be "Ready" ...
I0818 18:47:32.159139 1158634 pod_ready.go:36] extra waiting up to 6m0s for all system-critical pods including labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0818 18:47:32.165516 1158634 pod_ready.go:79] waiting up to 6m0s for pod "coredns-6f6b679f8f-jr2fb" in "kube-system" namespace to be "Ready" ...
I0818 18:47:32.171715 1158634 pod_ready.go:93] pod "coredns-6f6b679f8f-jr2fb" in "kube-system" namespace has status "Ready":"True"
I0818 18:47:32.171724 1158634 pod_ready.go:82] duration metric: took 6.197121ms for pod "coredns-6f6b679f8f-jr2fb" in "kube-system" namespace to be "Ready" ...
I0818 18:47:32.171732 1158634 pod_ready.go:79] waiting up to 6m0s for pod "etcd-functional-771033" in "kube-system" namespace to be "Ready" ...
I0818 18:47:32.226394 1158634 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I0818 18:47:32.240011 1158634 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I0818 18:47:32.498125 1158634 pod_ready.go:93] pod "etcd-functional-771033" in "kube-system" namespace has status "Ready":"True"
I0818 18:47:32.498147 1158634 pod_ready.go:82] duration metric: took 326.408239ms for pod "etcd-functional-771033" in "kube-system" namespace to be "Ready" ...
I0818 18:47:32.498159 1158634 pod_ready.go:79] waiting up to 6m0s for pod "kube-apiserver-functional-771033" in "kube-system" namespace to be "Ready" ...
I0818 18:47:32.828155 1158634 main.go:141] libmachine: Making call to close driver server
I0818 18:47:32.828160 1158634 main.go:141] libmachine: Making call to close driver server
I0818 18:47:32.828173 1158634 main.go:141] libmachine: (functional-771033) Calling .Close
I0818 18:47:32.828175 1158634 main.go:141] libmachine: (functional-771033) Calling .Close
I0818 18:47:32.828502 1158634 main.go:141] libmachine: Successfully made call to close driver server
I0818 18:47:32.828514 1158634 main.go:141] libmachine: Making call to close connection to plugin binary
I0818 18:47:32.828522 1158634 main.go:141] libmachine: Making call to close driver server
I0818 18:47:32.828528 1158634 main.go:141] libmachine: (functional-771033) Calling .Close
I0818 18:47:32.828663 1158634 main.go:141] libmachine: (functional-771033) DBG | Closing plugin on server side
I0818 18:47:32.828678 1158634 main.go:141] libmachine: Successfully made call to close driver server
I0818 18:47:32.828684 1158634 main.go:141] libmachine: Making call to close connection to plugin binary
I0818 18:47:32.828692 1158634 main.go:141] libmachine: Making call to close driver server
I0818 18:47:32.828699 1158634 main.go:141] libmachine: (functional-771033) Calling .Close
I0818 18:47:32.828787 1158634 main.go:141] libmachine: Successfully made call to close driver server
I0818 18:47:32.828793 1158634 main.go:141] libmachine: Making call to close connection to plugin binary
I0818 18:47:32.829001 1158634 main.go:141] libmachine: (functional-771033) DBG | Closing plugin on server side
I0818 18:47:32.829022 1158634 main.go:141] libmachine: Successfully made call to close driver server
I0818 18:47:32.829026 1158634 main.go:141] libmachine: Making call to close connection to plugin binary
I0818 18:47:32.835455 1158634 main.go:141] libmachine: Making call to close driver server
I0818 18:47:32.835463 1158634 main.go:141] libmachine: (functional-771033) Calling .Close
I0818 18:47:32.835687 1158634 main.go:141] libmachine: Successfully made call to close driver server
I0818 18:47:32.835696 1158634 main.go:141] libmachine: Making call to close connection to plugin binary
I0818 18:47:32.837773 1158634 out.go:177] * Enabled addons: storage-provisioner, default-storageclass
I0818 18:47:32.839091 1158634 addons.go:510] duration metric: took 901.832388ms for enable addons: enabled=[storage-provisioner default-storageclass]
I0818 18:47:32.898008 1158634 pod_ready.go:93] pod "kube-apiserver-functional-771033" in "kube-system" namespace has status "Ready":"True"
I0818 18:47:32.898024 1158634 pod_ready.go:82] duration metric: took 399.857357ms for pod "kube-apiserver-functional-771033" in "kube-system" namespace to be "Ready" ...
I0818 18:47:32.898036 1158634 pod_ready.go:79] waiting up to 6m0s for pod "kube-controller-manager-functional-771033" in "kube-system" namespace to be "Ready" ...
I0818 18:47:33.298097 1158634 pod_ready.go:93] pod "kube-controller-manager-functional-771033" in "kube-system" namespace has status "Ready":"True"
I0818 18:47:33.298110 1158634 pod_ready.go:82] duration metric: took 400.06857ms for pod "kube-controller-manager-functional-771033" in "kube-system" namespace to be "Ready" ...
I0818 18:47:33.298119 1158634 pod_ready.go:79] waiting up to 6m0s for pod "kube-proxy-f6krv" in "kube-system" namespace to be "Ready" ...
I0818 18:47:33.698288 1158634 pod_ready.go:93] pod "kube-proxy-f6krv" in "kube-system" namespace has status "Ready":"True"
I0818 18:47:33.698301 1158634 pod_ready.go:82] duration metric: took 400.177062ms for pod "kube-proxy-f6krv" in "kube-system" namespace to be "Ready" ...
I0818 18:47:33.698310 1158634 pod_ready.go:79] waiting up to 6m0s for pod "kube-scheduler-functional-771033" in "kube-system" namespace to be "Ready" ...
I0818 18:47:33.895948 1158634 pod_ready.go:98] error getting pod "kube-scheduler-functional-771033" in "kube-system" namespace (skipping!): Get "https://192.168.39.95:8441/api/v1/namespaces/kube-system/pods/kube-scheduler-functional-771033": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:33.895974 1158634 pod_ready.go:82] duration metric: took 197.656257ms for pod "kube-scheduler-functional-771033" in "kube-system" namespace to be "Ready" ...
E0818 18:47:33.895989 1158634 pod_ready.go:67] WaitExtra: waitPodCondition: error getting pod "kube-scheduler-functional-771033" in "kube-system" namespace (skipping!): Get "https://192.168.39.95:8441/api/v1/namespaces/kube-system/pods/kube-scheduler-functional-771033": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:33.896025 1158634 pod_ready.go:39] duration metric: took 1.736874148s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0818 18:47:33.896052 1158634 api_server.go:52] waiting for apiserver process to appear ...
I0818 18:47:33.896118 1158634 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0818 18:47:33.928906 1158634 api_server.go:72] duration metric: took 1.991655041s to wait for apiserver process to appear ...
I0818 18:47:33.928924 1158634 api_server.go:88] waiting for apiserver healthz status ...
I0818 18:47:33.928943 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:33.929527 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:34.429124 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:34.429812 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:34.929088 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:34.929773 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:35.429315 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:35.429886 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:35.929465 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:35.930158 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:36.429759 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:36.430449 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:36.929077 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:36.929815 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:37.429013 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:37.429766 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:37.929364 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:37.930026 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:38.429629 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:38.430312 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:38.929939 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:38.930644 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:39.429275 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:39.429944 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:39.930021 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:39.930702 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:40.429251 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:40.429837 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:40.929429 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:40.930150 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:41.429816 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:41.430536 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:41.929415 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:41.930134 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:42.429257 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:42.429919 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:42.929516 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:42.930179 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:43.429852 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:43.430470 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:43.929096 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:43.929798 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:44.429322 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:44.429974 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:44.929058 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:44.929809 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:45.429322 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:45.429914 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:45.929315 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:45.929933 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:46.429356 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:46.429950 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:46.929328 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:46.929990 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:47.429665 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:47.430256 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:47.929919 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:47.930620 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:48.429157 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:48.429764 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:48.929301 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:48.929944 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:49.429530 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:49.430153 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:49.930004 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:49.930640 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:50.429195 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:50.429922 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:50.929517 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:50.930172 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:51.429842 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:51.430506 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:51.929271 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:51.929918 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:52.429313 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:52.430004 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:52.929695 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:52.930328 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:53.429987 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:53.430606 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:53.929188 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:53.929953 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:54.429528 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:54.430256 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:54.929263 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:54.929956 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:55.429491 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:55.430192 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:55.929811 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:55.930524 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:56.429125 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:56.429891 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:56.929445 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:56.930161 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:57.429803 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:57.430427 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:57.929030 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:57.929778 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:58.429392 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:58.430157 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:58.929751 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:58.930464 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:59.429042 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:59.429804 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:47:59.929735 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:47:59.930400 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:48:00.430052 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:48:00.430757 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:48:00.929355 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:48:00.929971 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:48:01.429320 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:48:01.430139 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:48:01.929993 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:48:01.930704 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:48:02.429269 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:48:02.430008 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:48:02.929684 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:48:02.930409 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:48:03.430037 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:48:03.430714 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:48:03.929298 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:48:03.930064 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:48:04.429681 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:48:04.430287 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:48:04.929513 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:48:04.930142 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:48:05.429874 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:48:05.430573 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:48:05.929140 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:48:05.929854 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:48:06.429342 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:48:06.429984 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:48:06.929529 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:48:06.930217 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:48:07.429942 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:48:07.430546 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:48:07.929102 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:48:07.929792 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:48:08.429396 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:48:08.430124 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:48:08.929788 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:48:08.930461 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:48:09.429066 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:48:09.429795 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:48:09.929600 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:48:09.930277 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:48:10.429956 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:48:10.430615 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:48:10.929156 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:48:10.929767 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:48:11.429491 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:48:11.430124 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:48:11.929881 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:48:11.930424 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:48:12.428993 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:48:12.429679 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:48:12.929803 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:48:12.930419 1158634 api_server.go:269] stopped: https://192.168.39.95:8441/healthz: Get "https://192.168.39.95:8441/healthz": dial tcp 192.168.39.95:8441: connect: connection refused
I0818 18:48:13.429991 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:48:15.156957 1158634 api_server.go:279] https://192.168.39.95:8441/healthz returned 403:
{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
W0818 18:48:15.156985 1158634 api_server.go:103] status: https://192.168.39.95:8441/healthz returned error 403:
{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
I0818 18:48:15.157002 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:48:15.179828 1158634 api_server.go:279] https://192.168.39.95:8441/healthz returned 403:
{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
W0818 18:48:15.179852 1158634 api_server.go:103] status: https://192.168.39.95:8441/healthz returned error 403:
{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/healthz\"","reason":"Forbidden","details":{},"code":403}
I0818 18:48:15.429182 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:48:15.433603 1158634 api_server.go:279] https://192.168.39.95:8441/healthz returned 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
W0818 18:48:15.433621 1158634 api_server.go:103] status: https://192.168.39.95:8441/healthz returned error 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
I0818 18:48:15.929198 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:48:15.933946 1158634 api_server.go:279] https://192.168.39.95:8441/healthz returned 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
W0818 18:48:15.933968 1158634 api_server.go:103] status: https://192.168.39.95:8441/healthz returned error 500:
[+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/start-apiserver-admission-initializer ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/priority-and-fairness-config-consumer ok
[+]poststarthook/priority-and-fairness-filter ok
[+]poststarthook/storage-object-count-tracker-hook ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/crd-informer-synced ok
[+]poststarthook/start-system-namespaces-controller ok
[+]poststarthook/start-cluster-authentication-info-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-controller ok
[+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok
[+]poststarthook/start-legacy-token-tracking-controller ok
[+]poststarthook/start-service-ip-repair-controllers ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[+]poststarthook/priority-and-fairness-config-producer ok
[+]poststarthook/bootstrap-controller ok
[+]poststarthook/aggregator-reload-proxy-client-cert ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-status-local-available-controller ok
[+]poststarthook/apiservice-status-remote-available-controller ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-discovery-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/apiservice-openapiv3-controller ok
healthz check failed
I0818 18:48:16.429575 1158634 api_server.go:253] Checking apiserver healthz at https://192.168.39.95:8441/healthz ...
I0818 18:48:16.435519 1158634 api_server.go:279] https://192.168.39.95:8441/healthz returned 200:
ok
I0818 18:48:16.444034 1158634 api_server.go:141] control plane version: v1.31.0
I0818 18:48:16.444051 1158634 api_server.go:131] duration metric: took 42.515122562s to wait for apiserver health ...
I0818 18:48:16.444060 1158634 system_pods.go:43] waiting for kube-system pods to appear ...
I0818 18:48:16.450928 1158634 system_pods.go:59] 7 kube-system pods found
I0818 18:48:16.450944 1158634 system_pods.go:61] "coredns-6f6b679f8f-jr2fb" [590318eb-621f-4f74-b5be-0b6268a28d4d] Running
I0818 18:48:16.450947 1158634 system_pods.go:61] "etcd-functional-771033" [198cabf4-335e-4a96-b8ff-296969689489] Running
I0818 18:48:16.450950 1158634 system_pods.go:61] "kube-apiserver-functional-771033" [4186abeb-76d6-4bf5-beef-2994f20dcef1] Pending
I0818 18:48:16.450953 1158634 system_pods.go:61] "kube-controller-manager-functional-771033" [18d5e8f2-ab21-4d53-a1d6-2259043375d6] Running
I0818 18:48:16.450956 1158634 system_pods.go:61] "kube-proxy-f6krv" [6d61848c-ac48-4004-bbfd-99325c6c6b5e] Running
I0818 18:48:16.450958 1158634 system_pods.go:61] "kube-scheduler-functional-771033" [17e856bc-5dd1-4979-8d62-5ea894a05851] Running
I0818 18:48:16.450964 1158634 system_pods.go:61] "storage-provisioner" [01c74b7d-d168-47d2-8415-af0dcd45453e] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I0818 18:48:16.450970 1158634 system_pods.go:74] duration metric: took 6.906098ms to wait for pod list to return data ...
I0818 18:48:16.450978 1158634 default_sa.go:34] waiting for default service account to be created ...
I0818 18:48:16.454081 1158634 default_sa.go:45] found service account: "default"
I0818 18:48:16.454093 1158634 default_sa.go:55] duration metric: took 3.110353ms for default service account to be created ...
I0818 18:48:16.454100 1158634 system_pods.go:116] waiting for k8s-apps to be running ...
I0818 18:48:16.459600 1158634 system_pods.go:86] 7 kube-system pods found
I0818 18:48:16.459613 1158634 system_pods.go:89] "coredns-6f6b679f8f-jr2fb" [590318eb-621f-4f74-b5be-0b6268a28d4d] Running
I0818 18:48:16.459619 1158634 system_pods.go:89] "etcd-functional-771033" [198cabf4-335e-4a96-b8ff-296969689489] Running
I0818 18:48:16.459622 1158634 system_pods.go:89] "kube-apiserver-functional-771033" [4186abeb-76d6-4bf5-beef-2994f20dcef1] Pending
I0818 18:48:16.459625 1158634 system_pods.go:89] "kube-controller-manager-functional-771033" [18d5e8f2-ab21-4d53-a1d6-2259043375d6] Running
I0818 18:48:16.459628 1158634 system_pods.go:89] "kube-proxy-f6krv" [6d61848c-ac48-4004-bbfd-99325c6c6b5e] Running
I0818 18:48:16.459630 1158634 system_pods.go:89] "kube-scheduler-functional-771033" [17e856bc-5dd1-4979-8d62-5ea894a05851] Running
I0818 18:48:16.459635 1158634 system_pods.go:89] "storage-provisioner" [01c74b7d-d168-47d2-8415-af0dcd45453e] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I0818 18:48:16.459651 1158634 retry.go:31] will retry after 211.129846ms: missing components: kube-apiserver
I0818 18:48:16.676438 1158634 system_pods.go:86] 7 kube-system pods found
I0818 18:48:16.676454 1158634 system_pods.go:89] "coredns-6f6b679f8f-jr2fb" [590318eb-621f-4f74-b5be-0b6268a28d4d] Running
I0818 18:48:16.676458 1158634 system_pods.go:89] "etcd-functional-771033" [198cabf4-335e-4a96-b8ff-296969689489] Running
I0818 18:48:16.676461 1158634 system_pods.go:89] "kube-apiserver-functional-771033" [4186abeb-76d6-4bf5-beef-2994f20dcef1] Pending
I0818 18:48:16.676465 1158634 system_pods.go:89] "kube-controller-manager-functional-771033" [18d5e8f2-ab21-4d53-a1d6-2259043375d6] Running
I0818 18:48:16.676467 1158634 system_pods.go:89] "kube-proxy-f6krv" [6d61848c-ac48-4004-bbfd-99325c6c6b5e] Running
I0818 18:48:16.676470 1158634 system_pods.go:89] "kube-scheduler-functional-771033" [17e856bc-5dd1-4979-8d62-5ea894a05851] Running
I0818 18:48:16.676475 1158634 system_pods.go:89] "storage-provisioner" [01c74b7d-d168-47d2-8415-af0dcd45453e] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I0818 18:48:16.676490 1158634 retry.go:31] will retry after 311.734201ms: missing components: kube-apiserver
I0818 18:48:16.993969 1158634 system_pods.go:86] 7 kube-system pods found
I0818 18:48:16.993986 1158634 system_pods.go:89] "coredns-6f6b679f8f-jr2fb" [590318eb-621f-4f74-b5be-0b6268a28d4d] Running
I0818 18:48:16.993990 1158634 system_pods.go:89] "etcd-functional-771033" [198cabf4-335e-4a96-b8ff-296969689489] Running
I0818 18:48:16.993993 1158634 system_pods.go:89] "kube-apiserver-functional-771033" [4186abeb-76d6-4bf5-beef-2994f20dcef1] Pending
I0818 18:48:16.993997 1158634 system_pods.go:89] "kube-controller-manager-functional-771033" [18d5e8f2-ab21-4d53-a1d6-2259043375d6] Running
I0818 18:48:16.993999 1158634 system_pods.go:89] "kube-proxy-f6krv" [6d61848c-ac48-4004-bbfd-99325c6c6b5e] Running
I0818 18:48:16.994002 1158634 system_pods.go:89] "kube-scheduler-functional-771033" [17e856bc-5dd1-4979-8d62-5ea894a05851] Running
I0818 18:48:16.994007 1158634 system_pods.go:89] "storage-provisioner" [01c74b7d-d168-47d2-8415-af0dcd45453e] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I0818 18:48:16.994021 1158634 retry.go:31] will retry after 317.73006ms: missing components: kube-apiserver
I0818 18:48:17.317902 1158634 system_pods.go:86] 7 kube-system pods found
I0818 18:48:17.317919 1158634 system_pods.go:89] "coredns-6f6b679f8f-jr2fb" [590318eb-621f-4f74-b5be-0b6268a28d4d] Running
I0818 18:48:17.317923 1158634 system_pods.go:89] "etcd-functional-771033" [198cabf4-335e-4a96-b8ff-296969689489] Running
I0818 18:48:17.317926 1158634 system_pods.go:89] "kube-apiserver-functional-771033" [4186abeb-76d6-4bf5-beef-2994f20dcef1] Pending
I0818 18:48:17.317929 1158634 system_pods.go:89] "kube-controller-manager-functional-771033" [18d5e8f2-ab21-4d53-a1d6-2259043375d6] Running
I0818 18:48:17.317931 1158634 system_pods.go:89] "kube-proxy-f6krv" [6d61848c-ac48-4004-bbfd-99325c6c6b5e] Running
I0818 18:48:17.317934 1158634 system_pods.go:89] "kube-scheduler-functional-771033" [17e856bc-5dd1-4979-8d62-5ea894a05851] Running
I0818 18:48:17.317939 1158634 system_pods.go:89] "storage-provisioner" [01c74b7d-d168-47d2-8415-af0dcd45453e] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I0818 18:48:17.317956 1158634 retry.go:31] will retry after 594.967704ms: missing components: kube-apiserver
I0818 18:48:17.918853 1158634 system_pods.go:86] 7 kube-system pods found
I0818 18:48:17.918870 1158634 system_pods.go:89] "coredns-6f6b679f8f-jr2fb" [590318eb-621f-4f74-b5be-0b6268a28d4d] Running
I0818 18:48:17.918874 1158634 system_pods.go:89] "etcd-functional-771033" [198cabf4-335e-4a96-b8ff-296969689489] Running
I0818 18:48:17.918878 1158634 system_pods.go:89] "kube-apiserver-functional-771033" [4186abeb-76d6-4bf5-beef-2994f20dcef1] Pending
I0818 18:48:17.918881 1158634 system_pods.go:89] "kube-controller-manager-functional-771033" [18d5e8f2-ab21-4d53-a1d6-2259043375d6] Running
I0818 18:48:17.918883 1158634 system_pods.go:89] "kube-proxy-f6krv" [6d61848c-ac48-4004-bbfd-99325c6c6b5e] Running
I0818 18:48:17.918886 1158634 system_pods.go:89] "kube-scheduler-functional-771033" [17e856bc-5dd1-4979-8d62-5ea894a05851] Running
I0818 18:48:17.918892 1158634 system_pods.go:89] "storage-provisioner" [01c74b7d-d168-47d2-8415-af0dcd45453e] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I0818 18:48:17.918905 1158634 retry.go:31] will retry after 682.693224ms: missing components: kube-apiserver
I0818 18:48:18.606687 1158634 system_pods.go:86] 7 kube-system pods found
I0818 18:48:18.606704 1158634 system_pods.go:89] "coredns-6f6b679f8f-jr2fb" [590318eb-621f-4f74-b5be-0b6268a28d4d] Running
I0818 18:48:18.606708 1158634 system_pods.go:89] "etcd-functional-771033" [198cabf4-335e-4a96-b8ff-296969689489] Running
I0818 18:48:18.606711 1158634 system_pods.go:89] "kube-apiserver-functional-771033" [4186abeb-76d6-4bf5-beef-2994f20dcef1] Pending
I0818 18:48:18.606714 1158634 system_pods.go:89] "kube-controller-manager-functional-771033" [18d5e8f2-ab21-4d53-a1d6-2259043375d6] Running
I0818 18:48:18.606716 1158634 system_pods.go:89] "kube-proxy-f6krv" [6d61848c-ac48-4004-bbfd-99325c6c6b5e] Running
I0818 18:48:18.606719 1158634 system_pods.go:89] "kube-scheduler-functional-771033" [17e856bc-5dd1-4979-8d62-5ea894a05851] Running
I0818 18:48:18.606724 1158634 system_pods.go:89] "storage-provisioner" [01c74b7d-d168-47d2-8415-af0dcd45453e] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I0818 18:48:18.606738 1158634 retry.go:31] will retry after 627.683554ms: missing components: kube-apiserver
I0818 18:48:19.239862 1158634 system_pods.go:86] 7 kube-system pods found
I0818 18:48:19.239879 1158634 system_pods.go:89] "coredns-6f6b679f8f-jr2fb" [590318eb-621f-4f74-b5be-0b6268a28d4d] Running
I0818 18:48:19.239883 1158634 system_pods.go:89] "etcd-functional-771033" [198cabf4-335e-4a96-b8ff-296969689489] Running
I0818 18:48:19.239885 1158634 system_pods.go:89] "kube-apiserver-functional-771033" [4186abeb-76d6-4bf5-beef-2994f20dcef1] Pending
I0818 18:48:19.239888 1158634 system_pods.go:89] "kube-controller-manager-functional-771033" [18d5e8f2-ab21-4d53-a1d6-2259043375d6] Running
I0818 18:48:19.239891 1158634 system_pods.go:89] "kube-proxy-f6krv" [6d61848c-ac48-4004-bbfd-99325c6c6b5e] Running
I0818 18:48:19.239893 1158634 system_pods.go:89] "kube-scheduler-functional-771033" [17e856bc-5dd1-4979-8d62-5ea894a05851] Running
I0818 18:48:19.239898 1158634 system_pods.go:89] "storage-provisioner" [01c74b7d-d168-47d2-8415-af0dcd45453e] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I0818 18:48:19.239911 1158634 retry.go:31] will retry after 736.394257ms: missing components: kube-apiserver
I0818 18:48:19.983335 1158634 system_pods.go:86] 7 kube-system pods found
I0818 18:48:19.983355 1158634 system_pods.go:89] "coredns-6f6b679f8f-jr2fb" [590318eb-621f-4f74-b5be-0b6268a28d4d] Running
I0818 18:48:19.983359 1158634 system_pods.go:89] "etcd-functional-771033" [198cabf4-335e-4a96-b8ff-296969689489] Running
I0818 18:48:19.983362 1158634 system_pods.go:89] "kube-apiserver-functional-771033" [4186abeb-76d6-4bf5-beef-2994f20dcef1] Pending
I0818 18:48:19.983366 1158634 system_pods.go:89] "kube-controller-manager-functional-771033" [18d5e8f2-ab21-4d53-a1d6-2259043375d6] Running
I0818 18:48:19.983368 1158634 system_pods.go:89] "kube-proxy-f6krv" [6d61848c-ac48-4004-bbfd-99325c6c6b5e] Running
I0818 18:48:19.983370 1158634 system_pods.go:89] "kube-scheduler-functional-771033" [17e856bc-5dd1-4979-8d62-5ea894a05851] Running
I0818 18:48:19.983378 1158634 system_pods.go:89] "storage-provisioner" [01c74b7d-d168-47d2-8415-af0dcd45453e] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I0818 18:48:19.983391 1158634 retry.go:31] will retry after 1.006828126s: missing components: kube-apiserver
I0818 18:48:20.995239 1158634 system_pods.go:86] 7 kube-system pods found
I0818 18:48:20.995255 1158634 system_pods.go:89] "coredns-6f6b679f8f-jr2fb" [590318eb-621f-4f74-b5be-0b6268a28d4d] Running
I0818 18:48:20.995260 1158634 system_pods.go:89] "etcd-functional-771033" [198cabf4-335e-4a96-b8ff-296969689489] Running
I0818 18:48:20.995264 1158634 system_pods.go:89] "kube-apiserver-functional-771033" [4186abeb-76d6-4bf5-beef-2994f20dcef1] Pending
I0818 18:48:20.995267 1158634 system_pods.go:89] "kube-controller-manager-functional-771033" [18d5e8f2-ab21-4d53-a1d6-2259043375d6] Running
I0818 18:48:20.995269 1158634 system_pods.go:89] "kube-proxy-f6krv" [6d61848c-ac48-4004-bbfd-99325c6c6b5e] Running
I0818 18:48:20.995271 1158634 system_pods.go:89] "kube-scheduler-functional-771033" [17e856bc-5dd1-4979-8d62-5ea894a05851] Running
I0818 18:48:20.995277 1158634 system_pods.go:89] "storage-provisioner" [01c74b7d-d168-47d2-8415-af0dcd45453e] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I0818 18:48:20.995291 1158634 retry.go:31] will retry after 1.577247013s: missing components: kube-apiserver
I0818 18:48:22.578942 1158634 system_pods.go:86] 7 kube-system pods found
I0818 18:48:22.578958 1158634 system_pods.go:89] "coredns-6f6b679f8f-jr2fb" [590318eb-621f-4f74-b5be-0b6268a28d4d] Running
I0818 18:48:22.578962 1158634 system_pods.go:89] "etcd-functional-771033" [198cabf4-335e-4a96-b8ff-296969689489] Running
I0818 18:48:22.578965 1158634 system_pods.go:89] "kube-apiserver-functional-771033" [4186abeb-76d6-4bf5-beef-2994f20dcef1] Pending
I0818 18:48:22.578969 1158634 system_pods.go:89] "kube-controller-manager-functional-771033" [18d5e8f2-ab21-4d53-a1d6-2259043375d6] Running
I0818 18:48:22.578971 1158634 system_pods.go:89] "kube-proxy-f6krv" [6d61848c-ac48-4004-bbfd-99325c6c6b5e] Running
I0818 18:48:22.578974 1158634 system_pods.go:89] "kube-scheduler-functional-771033" [17e856bc-5dd1-4979-8d62-5ea894a05851] Running
I0818 18:48:22.578978 1158634 system_pods.go:89] "storage-provisioner" [01c74b7d-d168-47d2-8415-af0dcd45453e] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I0818 18:48:22.578993 1158634 retry.go:31] will retry after 2.241204737s: missing components: kube-apiserver
I0818 18:48:24.825327 1158634 system_pods.go:86] 7 kube-system pods found
I0818 18:48:24.825343 1158634 system_pods.go:89] "coredns-6f6b679f8f-jr2fb" [590318eb-621f-4f74-b5be-0b6268a28d4d] Running
I0818 18:48:24.825347 1158634 system_pods.go:89] "etcd-functional-771033" [198cabf4-335e-4a96-b8ff-296969689489] Running
I0818 18:48:24.825350 1158634 system_pods.go:89] "kube-apiserver-functional-771033" [4186abeb-76d6-4bf5-beef-2994f20dcef1] Pending
I0818 18:48:24.825353 1158634 system_pods.go:89] "kube-controller-manager-functional-771033" [18d5e8f2-ab21-4d53-a1d6-2259043375d6] Running
I0818 18:48:24.825356 1158634 system_pods.go:89] "kube-proxy-f6krv" [6d61848c-ac48-4004-bbfd-99325c6c6b5e] Running
I0818 18:48:24.825358 1158634 system_pods.go:89] "kube-scheduler-functional-771033" [17e856bc-5dd1-4979-8d62-5ea894a05851] Running
I0818 18:48:24.825363 1158634 system_pods.go:89] "storage-provisioner" [01c74b7d-d168-47d2-8415-af0dcd45453e] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I0818 18:48:24.825379 1158634 retry.go:31] will retry after 1.953357523s: missing components: kube-apiserver
I0818 18:48:26.786001 1158634 system_pods.go:86] 7 kube-system pods found
I0818 18:48:26.786018 1158634 system_pods.go:89] "coredns-6f6b679f8f-jr2fb" [590318eb-621f-4f74-b5be-0b6268a28d4d] Running
I0818 18:48:26.786022 1158634 system_pods.go:89] "etcd-functional-771033" [198cabf4-335e-4a96-b8ff-296969689489] Running
I0818 18:48:26.786025 1158634 system_pods.go:89] "kube-apiserver-functional-771033" [4186abeb-76d6-4bf5-beef-2994f20dcef1] Pending
I0818 18:48:26.786028 1158634 system_pods.go:89] "kube-controller-manager-functional-771033" [18d5e8f2-ab21-4d53-a1d6-2259043375d6] Running
I0818 18:48:26.786031 1158634 system_pods.go:89] "kube-proxy-f6krv" [6d61848c-ac48-4004-bbfd-99325c6c6b5e] Running
I0818 18:48:26.786033 1158634 system_pods.go:89] "kube-scheduler-functional-771033" [17e856bc-5dd1-4979-8d62-5ea894a05851] Running
I0818 18:48:26.786038 1158634 system_pods.go:89] "storage-provisioner" [01c74b7d-d168-47d2-8415-af0dcd45453e] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I0818 18:48:26.786052 1158634 retry.go:31] will retry after 2.541621745s: missing components: kube-apiserver
I0818 18:48:29.335628 1158634 system_pods.go:86] 7 kube-system pods found
I0818 18:48:29.335645 1158634 system_pods.go:89] "coredns-6f6b679f8f-jr2fb" [590318eb-621f-4f74-b5be-0b6268a28d4d] Running
I0818 18:48:29.335649 1158634 system_pods.go:89] "etcd-functional-771033" [198cabf4-335e-4a96-b8ff-296969689489] Running
I0818 18:48:29.335655 1158634 system_pods.go:89] "kube-apiserver-functional-771033" [4186abeb-76d6-4bf5-beef-2994f20dcef1] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver])
I0818 18:48:29.335659 1158634 system_pods.go:89] "kube-controller-manager-functional-771033" [18d5e8f2-ab21-4d53-a1d6-2259043375d6] Running
I0818 18:48:29.335663 1158634 system_pods.go:89] "kube-proxy-f6krv" [6d61848c-ac48-4004-bbfd-99325c6c6b5e] Running
I0818 18:48:29.335665 1158634 system_pods.go:89] "kube-scheduler-functional-771033" [17e856bc-5dd1-4979-8d62-5ea894a05851] Running
I0818 18:48:29.335670 1158634 system_pods.go:89] "storage-provisioner" [01c74b7d-d168-47d2-8415-af0dcd45453e] Running / Ready:ContainersNotReady (containers with unready status: [storage-provisioner]) / ContainersReady:ContainersNotReady (containers with unready status: [storage-provisioner])
I0818 18:48:29.335677 1158634 system_pods.go:126] duration metric: took 12.881572523s to wait for k8s-apps to be running ...
I0818 18:48:29.335684 1158634 system_svc.go:44] waiting for kubelet service to be running ....
I0818 18:48:29.335735 1158634 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I0818 18:48:29.351705 1158634 system_svc.go:56] duration metric: took 16.009623ms WaitForService to wait for kubelet
I0818 18:48:29.351728 1158634 kubeadm.go:582] duration metric: took 57.41448323s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0818 18:48:29.351753 1158634 node_conditions.go:102] verifying NodePressure condition ...
I0818 18:48:29.354857 1158634 node_conditions.go:122] node storage ephemeral capacity is 17734596Ki
I0818 18:48:29.354869 1158634 node_conditions.go:123] node cpu capacity is 2
I0818 18:48:29.354879 1158634 node_conditions.go:105] duration metric: took 3.122283ms to run NodePressure ...
I0818 18:48:29.354890 1158634 start.go:241] waiting for startup goroutines ...
I0818 18:48:29.354896 1158634 start.go:246] waiting for cluster config update ...
I0818 18:48:29.354906 1158634 start.go:255] writing updated cluster config ...
I0818 18:48:29.355209 1158634 ssh_runner.go:195] Run: rm -f paused
I0818 18:48:29.406644 1158634 start.go:600] kubectl: 1.31.0, cluster: 1.31.0 (minor skew: 0)
I0818 18:48:29.409136 1158634 out.go:177] * Done! kubectl is now configured to use "functional-771033" cluster and "default" namespace by default
==> Docker <==
Aug 18 18:48:03 functional-771033 dockerd[5825]: time="2024-08-18T18:48:03.848795158Z" level=info msg="ignoring event" container=bffd4da2399ae4c89e95ebd31f42111939d5c8690a99d5eb5dfaa88f60d43963 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Aug 18 18:48:03 functional-771033 dockerd[5832]: time="2024-08-18T18:48:03.851754763Z" level=info msg="shim disconnected" id=bffd4da2399ae4c89e95ebd31f42111939d5c8690a99d5eb5dfaa88f60d43963 namespace=moby
Aug 18 18:48:03 functional-771033 dockerd[5832]: time="2024-08-18T18:48:03.852015195Z" level=warning msg="cleaning up after shim disconnected" id=bffd4da2399ae4c89e95ebd31f42111939d5c8690a99d5eb5dfaa88f60d43963 namespace=moby
Aug 18 18:48:03 functional-771033 dockerd[5832]: time="2024-08-18T18:48:03.852240409Z" level=info msg="cleaning up dead shim" namespace=moby
Aug 18 18:48:03 functional-771033 dockerd[5832]: time="2024-08-18T18:48:03.933604742Z" level=info msg="shim disconnected" id=c2c599815a6cb42a94b4f95053ec338304d970684fee6b6dc9e135d8dc496318 namespace=moby
Aug 18 18:48:03 functional-771033 dockerd[5825]: time="2024-08-18T18:48:03.933831366Z" level=info msg="ignoring event" container=c2c599815a6cb42a94b4f95053ec338304d970684fee6b6dc9e135d8dc496318 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Aug 18 18:48:03 functional-771033 dockerd[5832]: time="2024-08-18T18:48:03.934720943Z" level=warning msg="cleaning up after shim disconnected" id=c2c599815a6cb42a94b4f95053ec338304d970684fee6b6dc9e135d8dc496318 namespace=moby
Aug 18 18:48:03 functional-771033 dockerd[5832]: time="2024-08-18T18:48:03.934807313Z" level=info msg="cleaning up dead shim" namespace=moby
Aug 18 18:48:05 functional-771033 dockerd[5832]: time="2024-08-18T18:48:05.894373987Z" level=info msg="loading plugin \"io.containerd.event.v1.publisher\"..." runtime=io.containerd.runc.v2 type=io.containerd.event.v1
Aug 18 18:48:05 functional-771033 dockerd[5832]: time="2024-08-18T18:48:05.894788213Z" level=info msg="loading plugin \"io.containerd.internal.v1.shutdown\"..." runtime=io.containerd.runc.v2 type=io.containerd.internal.v1
Aug 18 18:48:05 functional-771033 dockerd[5832]: time="2024-08-18T18:48:05.894901688Z" level=info msg="loading plugin \"io.containerd.ttrpc.v1.task\"..." runtime=io.containerd.runc.v2 type=io.containerd.ttrpc.v1
Aug 18 18:48:05 functional-771033 dockerd[5832]: time="2024-08-18T18:48:05.895018386Z" level=info msg="loading plugin \"io.containerd.ttrpc.v1.pause\"..." runtime=io.containerd.runc.v2 type=io.containerd.ttrpc.v1
Aug 18 18:48:05 functional-771033 dockerd[5832]: time="2024-08-18T18:48:05.968799841Z" level=info msg="shim disconnected" id=448705e7b9ad526d3670cbbd5fa7edb60a61c1a7e47c95e8090cc8e7590bc712 namespace=moby
Aug 18 18:48:05 functional-771033 dockerd[5832]: time="2024-08-18T18:48:05.968876562Z" level=warning msg="cleaning up after shim disconnected" id=448705e7b9ad526d3670cbbd5fa7edb60a61c1a7e47c95e8090cc8e7590bc712 namespace=moby
Aug 18 18:48:05 functional-771033 dockerd[5832]: time="2024-08-18T18:48:05.968886922Z" level=info msg="cleaning up dead shim" namespace=moby
Aug 18 18:48:05 functional-771033 dockerd[5825]: time="2024-08-18T18:48:05.969651353Z" level=info msg="ignoring event" container=448705e7b9ad526d3670cbbd5fa7edb60a61c1a7e47c95e8090cc8e7590bc712 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Aug 18 18:48:12 functional-771033 dockerd[5832]: time="2024-08-18T18:48:12.916194905Z" level=info msg="loading plugin \"io.containerd.event.v1.publisher\"..." runtime=io.containerd.runc.v2 type=io.containerd.event.v1
Aug 18 18:48:12 functional-771033 dockerd[5832]: time="2024-08-18T18:48:12.918841978Z" level=info msg="loading plugin \"io.containerd.internal.v1.shutdown\"..." runtime=io.containerd.runc.v2 type=io.containerd.internal.v1
Aug 18 18:48:12 functional-771033 dockerd[5832]: time="2024-08-18T18:48:12.918980547Z" level=info msg="loading plugin \"io.containerd.ttrpc.v1.task\"..." runtime=io.containerd.runc.v2 type=io.containerd.ttrpc.v1
Aug 18 18:48:12 functional-771033 dockerd[5832]: time="2024-08-18T18:48:12.919215840Z" level=info msg="loading plugin \"io.containerd.ttrpc.v1.pause\"..." runtime=io.containerd.runc.v2 type=io.containerd.ttrpc.v1
Aug 18 18:48:12 functional-771033 cri-dockerd[6114]: time="2024-08-18T18:48:12Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/1f80ee99e4a9011eccb3bbd8dc1ca93a4528c0c012f67644b0ac6de420624440/resolv.conf as [nameserver 192.168.122.1]"
Aug 18 18:48:13 functional-771033 dockerd[5832]: time="2024-08-18T18:48:13.077184005Z" level=info msg="loading plugin \"io.containerd.event.v1.publisher\"..." runtime=io.containerd.runc.v2 type=io.containerd.event.v1
Aug 18 18:48:13 functional-771033 dockerd[5832]: time="2024-08-18T18:48:13.077332893Z" level=info msg="loading plugin \"io.containerd.internal.v1.shutdown\"..." runtime=io.containerd.runc.v2 type=io.containerd.internal.v1
Aug 18 18:48:13 functional-771033 dockerd[5832]: time="2024-08-18T18:48:13.077373493Z" level=info msg="loading plugin \"io.containerd.ttrpc.v1.task\"..." runtime=io.containerd.runc.v2 type=io.containerd.ttrpc.v1
Aug 18 18:48:13 functional-771033 dockerd[5832]: time="2024-08-18T18:48:13.077494219Z" level=info msg="loading plugin \"io.containerd.ttrpc.v1.pause\"..." runtime=io.containerd.runc.v2 type=io.containerd.ttrpc.v1
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD
1df53b51ee39c 604f5db92eaa8 17 seconds ago Running kube-apiserver 0 1f80ee99e4a90 kube-apiserver-functional-771033
448705e7b9ad5 6e38f40d628db 25 seconds ago Exited storage-provisioner 5 4ffbabb8af4ee storage-provisioner
a276c2d0de3ca cbb01a7bd410d About a minute ago Running coredns 2 95b27eb8893a1 coredns-6f6b679f8f-jr2fb
5162d714630ec ad83b2ca7b09e About a minute ago Running kube-proxy 3 7fd05b94cc3a5 kube-proxy-f6krv
0565a39bb52bf 2e96e5913fc06 About a minute ago Running etcd 3 995ba9fbea38b etcd-functional-771033
4854ca3a4bea4 045733566833c About a minute ago Running kube-controller-manager 3 ed1547c0983af kube-controller-manager-functional-771033
354afb0d0718f 1766f54c897f0 About a minute ago Running kube-scheduler 3 8dbc777d2d2aa kube-scheduler-functional-771033
592a9b17d86a8 2e96e5913fc06 About a minute ago Created etcd 2 d4a6727a157c5 etcd-functional-771033
8e0a450f68a4c ad83b2ca7b09e About a minute ago Created kube-proxy 2 cde473f8e984a kube-proxy-f6krv
c4de3f53a26dc 045733566833c About a minute ago Created kube-controller-manager 2 c5771ab1f6cee kube-controller-manager-functional-771033
e1a825e0ca445 1766f54c897f0 About a minute ago Exited kube-scheduler 2 145b77ad63eca kube-scheduler-functional-771033
3537ffdbdfbb8 cbb01a7bd410d About a minute ago Exited coredns 1 163d5c0124cc7 coredns-6f6b679f8f-jr2fb
==> coredns [3537ffdbdfbb] <==
.:53
[INFO] plugin/reload: Running configuration SHA512 = 6c8bd46af3d98e03c4ae8e438c65dd0c69a5f817565481bcf1725dd66ff794963b7938c81e3a23d4c2ad9e52f818076e819219c79e8007dd90564767ed68ba4c
CoreDNS-1.11.1
linux/amd64, go1.20.7, ae2bbc2
[INFO] 127.0.0.1:53610 - 9083 "HINFO IN 7019311184401319666.9013614242261889566. udp 57 false 512" NXDOMAIN qr,rd,ra 57 0.020742812s
[INFO] SIGTERM: Shutting down servers then terminating
[INFO] plugin/health: Going into lameduck mode for 5s
==> coredns [a276c2d0de3c] <==
[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.27.4/tools/cache/reflector.go:231: failed to list *v1.EndpointSlice: Get "https://10.96.0.1:443/apis/discovery.k8s.io/v1/endpointslices?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: connect: connection refused
[ERROR] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.27.4/tools/cache/reflector.go:231: Failed to watch *v1.EndpointSlice: failed to list *v1.EndpointSlice: Get "https://10.96.0.1:443/apis/discovery.k8s.io/v1/endpointslices?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: connect: connection refused
[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.27.4/tools/cache/reflector.go:231: failed to list *v1.Namespace: Get "https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: connect: connection refused
[ERROR] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.27.4/tools/cache/reflector.go:231: Failed to watch *v1.Namespace: failed to list *v1.Namespace: Get "https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: connect: connection refused
[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.27.4/tools/cache/reflector.go:231: failed to list *v1.Service: Get "https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: connect: connection refused
[ERROR] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.27.4/tools/cache/reflector.go:231: Failed to watch *v1.Service: failed to list *v1.Service: Get "https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: connect: connection refused
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/ready: Still waiting on: "kubernetes"
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
.:53
[INFO] plugin/reload: Running configuration SHA512 = 6c8bd46af3d98e03c4ae8e438c65dd0c69a5f817565481bcf1725dd66ff794963b7938c81e3a23d4c2ad9e52f818076e819219c79e8007dd90564767ed68ba4c
CoreDNS-1.11.1
linux/amd64, go1.20.7, ae2bbc2
[INFO] 127.0.0.1:35392 - 15210 "HINFO IN 9149333849223737842.3715552012028273754. udp 57 false 512" NXDOMAIN qr,rd,ra 57 0.029200035s
==> describe nodes <==
Name: functional-771033
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=functional-771033
kubernetes.io/os=linux
minikube.k8s.io/commit=3607dd695a2685a662a9ebe804e6840665786af5
minikube.k8s.io/name=functional-771033
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2024_08_18T18_45_52_0700
minikube.k8s.io/version=v1.33.1
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///var/run/cri-dockerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Sun, 18 Aug 2024 18:45:49 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: functional-771033
AcquireTime: <unset>
RenewTime: Sun, 18 Aug 2024 18:48:29 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure Unknown Sun, 18 Aug 2024 18:47:18 +0000 Sun, 18 Aug 2024 18:48:16 +0000 NodeStatusUnknown Kubelet stopped posting node status.
DiskPressure Unknown Sun, 18 Aug 2024 18:47:18 +0000 Sun, 18 Aug 2024 18:48:16 +0000 NodeStatusUnknown Kubelet stopped posting node status.
PIDPressure Unknown Sun, 18 Aug 2024 18:47:18 +0000 Sun, 18 Aug 2024 18:48:16 +0000 NodeStatusUnknown Kubelet stopped posting node status.
Ready Unknown Sun, 18 Aug 2024 18:47:18 +0000 Sun, 18 Aug 2024 18:48:16 +0000 NodeStatusUnknown Kubelet stopped posting node status.
Addresses:
InternalIP: 192.168.39.95
Hostname: functional-771033
Capacity:
cpu: 2
ephemeral-storage: 17734596Ki
hugepages-2Mi: 0
memory: 3912780Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 17734596Ki
hugepages-2Mi: 0
memory: 3912780Ki
pods: 110
System Info:
Machine ID: c1d1b69e9dcc4cf9ae080243f90e5167
System UUID: c1d1b69e-9dcc-4cf9-ae08-0243f90e5167
Boot ID: 092eefd9-433d-419e-969a-6e2fd93db016
Kernel Version: 5.10.207
OS Image: Buildroot 2023.02.9
Operating System: linux
Architecture: amd64
Container Runtime Version: docker://27.1.2
Kubelet Version: v1.31.0
Kube-Proxy Version:
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (7 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
kube-system coredns-6f6b679f8f-jr2fb 100m (5%) 0 (0%) 70Mi (1%) 170Mi (4%) 2m33s
kube-system etcd-functional-771033 100m (5%) 0 (0%) 100Mi (2%) 0 (0%) 2m39s
kube-system kube-apiserver-functional-771033 250m (12%) 0 (0%) 0 (0%) 0 (0%) 15s
kube-system kube-controller-manager-functional-771033 200m (10%) 0 (0%) 0 (0%) 0 (0%) 2m38s
kube-system kube-proxy-f6krv 0 (0%) 0 (0%) 0 (0%) 0 (0%) 2m33s
kube-system kube-scheduler-functional-771033 100m (5%) 0 (0%) 0 (0%) 0 (0%) 2m38s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 2m31s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 750m (37%) 0 (0%)
memory 170Mi (4%) 170Mi (4%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 70s kube-proxy
Normal Starting 117s kube-proxy
Normal Starting 2m31s kube-proxy
Normal NodeHasSufficientMemory 2m44s (x8 over 2m44s) kubelet Node functional-771033 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 2m44s (x8 over 2m44s) kubelet Node functional-771033 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 2m44s (x7 over 2m44s) kubelet Node functional-771033 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 2m44s kubelet Updated Node Allocatable limit across pods
Normal NodeAllocatableEnforced 2m39s kubelet Updated Node Allocatable limit across pods
Normal Starting 2m39s kubelet Starting kubelet.
Normal NodeHasSufficientPID 2m38s kubelet Node functional-771033 status is now: NodeHasSufficientPID
Normal NodeHasSufficientMemory 2m38s kubelet Node functional-771033 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 2m38s kubelet Node functional-771033 status is now: NodeHasNoDiskPressure
Normal NodeReady 2m36s kubelet Node functional-771033 status is now: NodeReady
Normal RegisteredNode 2m34s node-controller Node functional-771033 event: Registered Node functional-771033 in Controller
Normal Starting 2m3s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 2m3s (x8 over 2m3s) kubelet Node functional-771033 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 2m3s (x8 over 2m3s) kubelet Node functional-771033 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 2m3s (x7 over 2m3s) kubelet Node functional-771033 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 2m3s kubelet Updated Node Allocatable limit across pods
Normal RegisteredNode 116s node-controller Node functional-771033 event: Registered Node functional-771033 in Controller
Normal NodeHasSufficientMemory 77s (x8 over 77s) kubelet Node functional-771033 status is now: NodeHasSufficientMemory
Normal Starting 77s kubelet Starting kubelet.
Normal NodeHasNoDiskPressure 77s (x8 over 77s) kubelet Node functional-771033 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 77s (x7 over 77s) kubelet Node functional-771033 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 77s kubelet Updated Node Allocatable limit across pods
Normal RegisteredNode 69s node-controller Node functional-771033 event: Registered Node functional-771033 in Controller
Normal NodeNotReady 14s node-controller Node functional-771033 status is now: NodeNotReady
==> dmesg <==
[ +0.139694] systemd-fstab-generator[3710]: Ignoring "noauto" option for root device
[ +0.160021] systemd-fstab-generator[3725]: Ignoring "noauto" option for root device
[ +0.520033] systemd-fstab-generator[3899]: Ignoring "noauto" option for root device
[ +1.808467] systemd-fstab-generator[4020]: Ignoring "noauto" option for root device
[ +0.067780] kauditd_printk_skb: 137 callbacks suppressed
[ +5.008156] kauditd_printk_skb: 74 callbacks suppressed
[ +7.496805] kauditd_printk_skb: 11 callbacks suppressed
[ +5.647699] systemd-fstab-generator[4925]: Ignoring "noauto" option for root device
[ +11.035497] systemd-fstab-generator[5348]: Ignoring "noauto" option for root device
[ +0.091843] kauditd_printk_skb: 12 callbacks suppressed
[ +0.228503] systemd-fstab-generator[5383]: Ignoring "noauto" option for root device
[ +0.156320] systemd-fstab-generator[5395]: Ignoring "noauto" option for root device
[ +0.171386] systemd-fstab-generator[5409]: Ignoring "noauto" option for root device
[Aug18 18:47] kauditd_printk_skb: 91 callbacks suppressed
[ +7.519969] systemd-fstab-generator[6062]: Ignoring "noauto" option for root device
[ +0.135117] systemd-fstab-generator[6074]: Ignoring "noauto" option for root device
[ +0.128926] systemd-fstab-generator[6086]: Ignoring "noauto" option for root device
[ +0.157908] systemd-fstab-generator[6101]: Ignoring "noauto" option for root device
[ +0.510684] systemd-fstab-generator[6278]: Ignoring "noauto" option for root device
[ +2.746239] systemd-fstab-generator[6983]: Ignoring "noauto" option for root device
[ +1.015203] kauditd_printk_skb: 205 callbacks suppressed
[ +5.141677] kauditd_printk_skb: 42 callbacks suppressed
[ +12.255339] systemd-fstab-generator[8158]: Ignoring "noauto" option for root device
[ +0.107207] kauditd_printk_skb: 19 callbacks suppressed
[ +5.831869] kauditd_printk_skb: 14 callbacks suppressed
==> etcd [0565a39bb52b] <==
{"level":"info","ts":"2024-08-18T18:47:15.463115Z","caller":"embed/etcd.go:599","msg":"serving peer traffic","address":"192.168.39.95:2380"}
{"level":"info","ts":"2024-08-18T18:47:15.469622Z","caller":"embed/etcd.go:279","msg":"now serving peer/client/metrics","local-member-id":"a71e7bac075997","initial-advertise-peer-urls":["https://192.168.39.95:2380"],"listen-peer-urls":["https://192.168.39.95:2380"],"advertise-client-urls":["https://192.168.39.95:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.39.95:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
{"level":"info","ts":"2024-08-18T18:47:15.469639Z","caller":"embed/etcd.go:870","msg":"serving metrics","address":"http://127.0.0.1:2381"}
{"level":"info","ts":"2024-08-18T18:47:15.470448Z","caller":"fileutil/purge.go:50","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/snap","suffix":"snap","max":5,"interval":"30s"}
{"level":"info","ts":"2024-08-18T18:47:15.475119Z","caller":"fileutil/purge.go:50","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/wal","suffix":"wal","max":5,"interval":"30s"}
{"level":"info","ts":"2024-08-18T18:47:15.475361Z","caller":"membership/cluster.go:421","msg":"added member","cluster-id":"986e33f48d4d13ba","local-member-id":"a71e7bac075997","added-peer-id":"a71e7bac075997","added-peer-peer-urls":["https://192.168.39.95:2380"]}
{"level":"info","ts":"2024-08-18T18:47:15.478269Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"986e33f48d4d13ba","local-member-id":"a71e7bac075997","cluster-version":"3.5"}
{"level":"info","ts":"2024-08-18T18:47:15.478318Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2024-08-18T18:47:15.489302Z","caller":"embed/etcd.go:571","msg":"cmux::serve","address":"192.168.39.95:2380"}
{"level":"info","ts":"2024-08-18T18:47:16.789525Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"a71e7bac075997 is starting a new election at term 3"}
{"level":"info","ts":"2024-08-18T18:47:16.789583Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"a71e7bac075997 became pre-candidate at term 3"}
{"level":"info","ts":"2024-08-18T18:47:16.789631Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"a71e7bac075997 received MsgPreVoteResp from a71e7bac075997 at term 3"}
{"level":"info","ts":"2024-08-18T18:47:16.789645Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"a71e7bac075997 became candidate at term 4"}
{"level":"info","ts":"2024-08-18T18:47:16.789672Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"a71e7bac075997 received MsgVoteResp from a71e7bac075997 at term 4"}
{"level":"info","ts":"2024-08-18T18:47:16.789683Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"a71e7bac075997 became leader at term 4"}
{"level":"info","ts":"2024-08-18T18:47:16.789690Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: a71e7bac075997 elected leader a71e7bac075997 at term 4"}
{"level":"info","ts":"2024-08-18T18:47:16.795260Z","caller":"etcdserver/server.go:2118","msg":"published local member to cluster through raft","local-member-id":"a71e7bac075997","local-member-attributes":"{Name:functional-771033 ClientURLs:[https://192.168.39.95:2379]}","request-path":"/0/members/a71e7bac075997/attributes","cluster-id":"986e33f48d4d13ba","publish-timeout":"7s"}
{"level":"info","ts":"2024-08-18T18:47:16.795298Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2024-08-18T18:47:16.795794Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2024-08-18T18:47:16.795893Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2024-08-18T18:47:16.795806Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2024-08-18T18:47:16.796558Z","caller":"v3rpc/health.go:61","msg":"grpc service status changed","service":"","status":"SERVING"}
{"level":"info","ts":"2024-08-18T18:47:16.797016Z","caller":"v3rpc/health.go:61","msg":"grpc service status changed","service":"","status":"SERVING"}
{"level":"info","ts":"2024-08-18T18:47:16.797549Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"info","ts":"2024-08-18T18:47:16.798204Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.39.95:2379"}
==> etcd [592a9b17d86a] <==
==> kernel <==
18:48:30 up 3 min, 0 users, load average: 1.26, 0.93, 0.38
Linux functional-771033 5.10.207 #1 SMP Thu Aug 15 21:30:57 UTC 2024 x86_64 GNU/Linux
PRETTY_NAME="Buildroot 2023.02.9"
==> kube-apiserver [1df53b51ee39] <==
I0818 18:48:15.052315 1 crdregistration_controller.go:114] Starting crd-autoregister controller
I0818 18:48:15.052455 1 shared_informer.go:313] Waiting for caches to sync for crd-autoregister
I0818 18:48:15.123969 1 dynamic_cafile_content.go:160] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt"
I0818 18:48:15.125139 1 dynamic_cafile_content.go:160] "Starting controller" name="request-header::/var/lib/minikube/certs/front-proxy-ca.crt"
I0818 18:48:15.201583 1 shared_informer.go:320] Caches are synced for *generic.policySource[*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicy,*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicyBinding,k8s.io/apiserver/pkg/admission/plugin/policy/validating.Validator]
I0818 18:48:15.201623 1 policy_source.go:224] refreshing policies
I0818 18:48:15.235522 1 controller.go:615] quota admission added evaluator for: leases.coordination.k8s.io
I0818 18:48:15.245135 1 apf_controller.go:382] Running API Priority and Fairness config worker
I0818 18:48:15.245170 1 apf_controller.go:385] Running API Priority and Fairness periodic rebalancing process
I0818 18:48:15.246471 1 cache.go:39] Caches are synced for APIServiceRegistrationController controller
I0818 18:48:15.250525 1 cache.go:39] Caches are synced for LocalAvailability controller
I0818 18:48:15.250962 1 shared_informer.go:320] Caches are synced for cluster_authentication_trust_controller
I0818 18:48:15.251708 1 shared_informer.go:320] Caches are synced for configmaps
I0818 18:48:15.252948 1 cache.go:39] Caches are synced for RemoteAvailability controller
I0818 18:48:15.253466 1 shared_informer.go:320] Caches are synced for crd-autoregister
I0818 18:48:15.253708 1 aggregator.go:171] initial CRD sync complete...
I0818 18:48:15.253752 1 autoregister_controller.go:144] Starting autoregister controller
I0818 18:48:15.253770 1 cache.go:32] Waiting for caches to sync for autoregister controller
I0818 18:48:15.253789 1 cache.go:39] Caches are synced for autoregister controller
I0818 18:48:15.254818 1 handler_discovery.go:450] Starting ResourceDiscoveryManager
I0818 18:48:15.298292 1 shared_informer.go:320] Caches are synced for node_authorizer
I0818 18:48:16.050946 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
W0818 18:48:16.264604 1 lease.go:265] Resetting endpoints for master service "kubernetes" to [192.168.39.95]
I0818 18:48:16.266283 1 controller.go:615] quota admission added evaluator for: endpoints
I0818 18:48:16.270748 1 controller.go:615] quota admission added evaluator for: endpointslices.discovery.k8s.io
==> kube-controller-manager [4854ca3a4bea] <==
E0818 18:48:11.433388 1 node_lifecycle_controller.go:720] "Failed while getting a Node to retry updating node health. Probably Node was deleted" logger="node-lifecycle-controller" node="functional-771033"
E0818 18:48:11.433467 1 node_lifecycle_controller.go:725] "Update health of Node from Controller error, Skipping - no pods will be evicted" err="Get \"https://192.168.39.95:8441/api/v1/nodes/functional-771033\": dial tcp 192.168.39.95:8441: connect: connection refused" logger="node-lifecycle-controller" node=""
E0818 18:48:15.148321 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Lease: unknown (get leases.coordination.k8s.io)" logger="UnhandledError"
E0818 18:48:15.148572 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.ClusterRole: unknown (get clusterroles.rbac.authorization.k8s.io)" logger="UnhandledError"
E0818 18:48:15.148676 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.ValidatingAdmissionPolicyBinding: unknown (get validatingadmissionpolicybindings.admissionregistration.k8s.io)" logger="UnhandledError"
E0818 18:48:15.148783 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.ConfigMap: unknown (get configmaps)" logger="UnhandledError"
E0818 18:48:15.148855 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Role: unknown (get roles.rbac.authorization.k8s.io)" logger="UnhandledError"
E0818 18:48:15.148935 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PriorityClass: unknown (get priorityclasses.scheduling.k8s.io)" logger="UnhandledError"
E0818 18:48:15.149013 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSINode: unknown (get csinodes.storage.k8s.io)" logger="UnhandledError"
E0818 18:48:15.149096 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.ClusterRoleBinding: unknown (get clusterrolebindings.rbac.authorization.k8s.io)" logger="UnhandledError"
E0818 18:48:15.151216 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.ControllerRevision: unknown (get controllerrevisions.apps)" logger="UnhandledError"
E0818 18:48:15.151340 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PriorityLevelConfiguration: unknown (get prioritylevelconfigurations.flowcontrol.apiserver.k8s.io)" logger="UnhandledError"
I0818 18:48:16.434718 1 node_lifecycle_controller.go:1232] "Initializing eviction metric for zone" logger="node-lifecycle-controller" zone=""
I0818 18:48:16.476250 1 controller_utils.go:151] "Failed to update status for pod" logger="node-lifecycle-controller" pod="kube-system/kube-apiserver-functional-771033" err="Operation cannot be fulfilled on pods \"kube-apiserver-functional-771033\": StorageError: invalid object, Code: 4, Key: /registry/pods/kube-system/kube-apiserver-functional-771033, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 50144038-6f50-40ec-91d8-5c6157da045a, UID in object meta: 4186abeb-76d6-4bf5-beef-2994f20dcef1"
E0818 18:48:16.504706 1 node_lifecycle_controller.go:758] "Unhandled Error" err="unable to mark all pods NotReady on node functional-771033: Operation cannot be fulfilled on pods \"kube-apiserver-functional-771033\": StorageError: invalid object, Code: 4, Key: /registry/pods/kube-system/kube-apiserver-functional-771033, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 50144038-6f50-40ec-91d8-5c6157da045a, UID in object meta: 4186abeb-76d6-4bf5-beef-2994f20dcef1; queuing for retry" logger="UnhandledError"
I0818 18:48:16.506179 1 node_lifecycle_controller.go:1036] "Controller detected that all Nodes are not-Ready. Entering master disruption mode" logger="node-lifecycle-controller"
E0818 18:48:21.511148 1 node_lifecycle_controller.go:978] "Error updating node" err="Operation cannot be fulfilled on nodes \"functional-771033\": the object has been modified; please apply your changes to the latest version and try again" logger="node-lifecycle-controller" node="functional-771033"
I0818 18:48:21.538466 1 controller_utils.go:151] "Failed to update status for pod" logger="node-lifecycle-controller" pod="kube-system/coredns-6f6b679f8f-jr2fb" err="Operation cannot be fulfilled on pods \"coredns-6f6b679f8f-jr2fb\": the object has been modified; please apply your changes to the latest version and try again"
I0818 18:48:21.543860 1 controller_utils.go:151] "Failed to update status for pod" logger="node-lifecycle-controller" pod="kube-system/etcd-functional-771033" err="Operation cannot be fulfilled on pods \"etcd-functional-771033\": the object has been modified; please apply your changes to the latest version and try again"
I0818 18:48:21.547500 1 controller_utils.go:151] "Failed to update status for pod" logger="node-lifecycle-controller" pod="kube-system/kube-apiserver-functional-771033" err="Operation cannot be fulfilled on pods \"kube-apiserver-functional-771033\": StorageError: invalid object, Code: 4, Key: /registry/pods/kube-system/kube-apiserver-functional-771033, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 50144038-6f50-40ec-91d8-5c6157da045a, UID in object meta: 4186abeb-76d6-4bf5-beef-2994f20dcef1"
I0818 18:48:21.552790 1 controller_utils.go:151] "Failed to update status for pod" logger="node-lifecycle-controller" pod="kube-system/kube-controller-manager-functional-771033" err="Operation cannot be fulfilled on pods \"kube-controller-manager-functional-771033\": the object has been modified; please apply your changes to the latest version and try again"
I0818 18:48:21.557030 1 controller_utils.go:151] "Failed to update status for pod" logger="node-lifecycle-controller" pod="kube-system/kube-proxy-f6krv" err="Operation cannot be fulfilled on pods \"kube-proxy-f6krv\": the object has been modified; please apply your changes to the latest version and try again"
I0818 18:48:21.563494 1 controller_utils.go:151] "Failed to update status for pod" logger="node-lifecycle-controller" pod="kube-system/kube-scheduler-functional-771033" err="Operation cannot be fulfilled on pods \"kube-scheduler-functional-771033\": the object has been modified; please apply your changes to the latest version and try again"
E0818 18:48:21.563782 1 node_lifecycle_controller.go:758] "Unhandled Error" err="unable to mark all pods NotReady on node functional-771033: [Operation cannot be fulfilled on pods \"coredns-6f6b679f8f-jr2fb\": the object has been modified; please apply your changes to the latest version and try again, Operation cannot be fulfilled on pods \"etcd-functional-771033\": the object has been modified; please apply your changes to the latest version and try again, Operation cannot be fulfilled on pods \"kube-apiserver-functional-771033\": StorageError: invalid object, Code: 4, Key: /registry/pods/kube-system/kube-apiserver-functional-771033, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 50144038-6f50-40ec-91d8-5c6157da045a, UID in object meta: 4186abeb-76d6-4bf5-beef-2994f20dcef1, Operation cannot be fulfilled on pods \"kube-controller-manager-functional-771033\": the object has been modified; please apply your changes to the latest version and try again, Operation
cannot be fulfilled on pods \"kube-proxy-f6krv\": the object has been modified; please apply your changes to the latest version and try again, Operation cannot be fulfilled on pods \"kube-scheduler-functional-771033\": the object has been modified; please apply your changes to the latest version and try again]; queuing for retry" logger="UnhandledError"
I0818 18:48:26.565584 1 node_lifecycle_controller.go:1055] "Controller detected that some Nodes are Ready. Exiting master disruption mode" logger="node-lifecycle-controller"
==> kube-controller-manager [c4de3f53a26d] <==
==> kube-proxy [5162d714630e] <==
^^^^^^^^^^^^^^^^^^^^^^^^
>
E0818 18:47:20.049484 1 proxier.go:734] "Error cleaning up nftables rules" err=<
could not run nftables command: /dev/stdin:1:1-25: Error: Could not process rule: Operation not supported
add table ip6 kube-proxy
^^^^^^^^^^^^^^^^^^^^^^^^^
>
I0818 18:47:20.059806 1 server.go:677] "Successfully retrieved node IP(s)" IPs=["192.168.39.95"]
E0818 18:47:20.059920 1 server.go:234] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I0818 18:47:20.117170 1 server_linux.go:146] "No iptables support for family" ipFamily="IPv6"
I0818 18:47:20.117235 1 server.go:245] "kube-proxy running in single-stack mode" ipFamily="IPv4"
I0818 18:47:20.117265 1 server_linux.go:169] "Using iptables Proxier"
I0818 18:47:20.127750 1 proxier.go:255] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I0818 18:47:20.129332 1 server.go:483] "Version info" version="v1.31.0"
I0818 18:47:20.130721 1 server.go:485] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I0818 18:47:20.134008 1 config.go:197] "Starting service config controller"
I0818 18:47:20.134047 1 shared_informer.go:313] Waiting for caches to sync for service config
I0818 18:47:20.134124 1 config.go:104] "Starting endpoint slice config controller"
I0818 18:47:20.134128 1 shared_informer.go:313] Waiting for caches to sync for endpoint slice config
I0818 18:47:20.134415 1 config.go:326] "Starting node config controller"
I0818 18:47:20.134439 1 shared_informer.go:313] Waiting for caches to sync for node config
I0818 18:47:20.234857 1 shared_informer.go:320] Caches are synced for node config
I0818 18:47:20.235048 1 shared_informer.go:320] Caches are synced for service config
I0818 18:47:20.235106 1 shared_informer.go:320] Caches are synced for endpoint slice config
E0818 18:48:15.233453 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.EndpointSlice: unknown (get endpointslices.discovery.k8s.io)" logger="UnhandledError"
==> kube-proxy [8e0a450f68a4] <==
==> kube-scheduler [354afb0d0718] <==
I0818 18:47:16.066688 1 serving.go:386] Generated self-signed cert in-memory
W0818 18:47:18.085113 1 requestheader_controller.go:196] Unable to get configmap/extension-apiserver-authentication in kube-system. Usually fixed by 'kubectl create rolebinding -n kube-system ROLEBINDING_NAME --role=extension-apiserver-authentication-reader --serviceaccount=YOUR_NS:YOUR_SA'
W0818 18:47:18.085153 1 authentication.go:370] Error looking up in-cluster authentication configuration: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot get resource "configmaps" in API group "" in the namespace "kube-system"
W0818 18:47:18.085453 1 authentication.go:371] Continuing without authentication configuration. This may treat all requests as anonymous.
W0818 18:47:18.085802 1 authentication.go:372] To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false
I0818 18:47:18.162988 1 server.go:167] "Starting Kubernetes Scheduler" version="v1.31.0"
I0818 18:47:18.163025 1 server.go:169] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I0818 18:47:18.165858 1 configmap_cafile_content.go:205] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
I0818 18:47:18.165967 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
I0818 18:47:18.166388 1 secure_serving.go:213] Serving securely on 127.0.0.1:10259
I0818 18:47:18.166463 1 tlsconfig.go:243] "Starting DynamicServingCertificateController"
I0818 18:47:18.267309 1 shared_informer.go:320] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
E0818 18:48:15.128685 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PersistentVolume: unknown (get persistentvolumes)" logger="UnhandledError"
E0818 18:48:15.129146 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.ReplicaSet: unknown (get replicasets.apps)" logger="UnhandledError"
E0818 18:48:15.129397 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Namespace: unknown (get namespaces)" logger="UnhandledError"
==> kube-scheduler [e1a825e0ca44] <==
==> kubelet <==
Aug 18 18:48:04 functional-771033 kubelet[6990]: E0818 18:48:04.453245 6990 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = Unknown desc = Error response from daemon: No such container: a6bf6cd3f233c984cd52bcff2a0a68b29dce520e47aa06f32f573e0f5b452c05" containerID="a6bf6cd3f233c984cd52bcff2a0a68b29dce520e47aa06f32f573e0f5b452c05"
Aug 18 18:48:04 functional-771033 kubelet[6990]: I0818 18:48:04.453278 6990 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"docker","ID":"a6bf6cd3f233c984cd52bcff2a0a68b29dce520e47aa06f32f573e0f5b452c05"} err="failed to get container status \"a6bf6cd3f233c984cd52bcff2a0a68b29dce520e47aa06f32f573e0f5b452c05\": rpc error: code = Unknown desc = Error response from daemon: No such container: a6bf6cd3f233c984cd52bcff2a0a68b29dce520e47aa06f32f573e0f5b452c05"
Aug 18 18:48:05 functional-771033 kubelet[6990]: E0818 18:48:05.588219 6990 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://control-plane.minikube.internal:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-771033?timeout=10s\": dial tcp 192.168.39.95:8441: connect: connection refused" interval="7s"
Aug 18 18:48:05 functional-771033 kubelet[6990]: I0818 18:48:05.812866 6990 scope.go:117] "RemoveContainer" containerID="e552016113ce1766dc6af57b26ade7a78db21fafe0b4fbb31bed5694116effca"
Aug 18 18:48:05 functional-771033 kubelet[6990]: I0818 18:48:05.818722 6990 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="16cc9ffa28d8b1489ccffe7a75276630" path="/var/lib/kubelet/pods/16cc9ffa28d8b1489ccffe7a75276630/volumes"
Aug 18 18:48:06 functional-771033 kubelet[6990]: I0818 18:48:06.440394 6990 scope.go:117] "RemoveContainer" containerID="e552016113ce1766dc6af57b26ade7a78db21fafe0b4fbb31bed5694116effca"
Aug 18 18:48:06 functional-771033 kubelet[6990]: I0818 18:48:06.440650 6990 scope.go:117] "RemoveContainer" containerID="448705e7b9ad526d3670cbbd5fa7edb60a61c1a7e47c95e8090cc8e7590bc712"
Aug 18 18:48:06 functional-771033 kubelet[6990]: E0818 18:48:06.440755 6990 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"storage-provisioner\" with CrashLoopBackOff: \"back-off 40s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(01c74b7d-d168-47d2-8415-af0dcd45453e)\"" pod="kube-system/storage-provisioner" podUID="01c74b7d-d168-47d2-8415-af0dcd45453e"
Aug 18 18:48:06 functional-771033 kubelet[6990]: I0818 18:48:06.441559 6990 status_manager.go:851] "Failed to get status for pod" podUID="01c74b7d-d168-47d2-8415-af0dcd45453e" pod="kube-system/storage-provisioner" err="Get \"https://control-plane.minikube.internal:8441/api/v1/namespaces/kube-system/pods/storage-provisioner\": dial tcp 192.168.39.95:8441: connect: connection refused"
Aug 18 18:48:12 functional-771033 kubelet[6990]: E0818 18:48:12.590374 6990 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://control-plane.minikube.internal:8441/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/functional-771033?timeout=10s\": dial tcp 192.168.39.95:8441: connect: connection refused" interval="7s"
Aug 18 18:48:12 functional-771033 kubelet[6990]: I0818 18:48:12.813423 6990 kubelet.go:1895] "Trying to delete pod" pod="kube-system/kube-apiserver-functional-771033" podUID="50144038-6f50-40ec-91d8-5c6157da045a"
Aug 18 18:48:12 functional-771033 kubelet[6990]: I0818 18:48:12.814273 6990 status_manager.go:851] "Failed to get status for pod" podUID="01c74b7d-d168-47d2-8415-af0dcd45453e" pod="kube-system/storage-provisioner" err="Get \"https://control-plane.minikube.internal:8441/api/v1/namespaces/kube-system/pods/storage-provisioner\": dial tcp 192.168.39.95:8441: connect: connection refused"
Aug 18 18:48:12 functional-771033 kubelet[6990]: E0818 18:48:12.814421 6990 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://control-plane.minikube.internal:8441/api/v1/namespaces/kube-system/pods/kube-apiserver-functional-771033\": dial tcp 192.168.39.95:8441: connect: connection refused" pod="kube-system/kube-apiserver-functional-771033"
Aug 18 18:48:13 functional-771033 kubelet[6990]: I0818 18:48:13.518155 6990 kubelet.go:1895] "Trying to delete pod" pod="kube-system/kube-apiserver-functional-771033" podUID="50144038-6f50-40ec-91d8-5c6157da045a"
Aug 18 18:48:13 functional-771033 kubelet[6990]: E0818 18:48:13.839969 6990 iptables.go:577] "Could not set up iptables canary" err=<
Aug 18 18:48:13 functional-771033 kubelet[6990]: error creating chain "KUBE-KUBELET-CANARY": exit status 3: Ignoring deprecated --wait-interval option.
Aug 18 18:48:13 functional-771033 kubelet[6990]: ip6tables v1.8.9 (legacy): can't initialize ip6tables table `nat': Table does not exist (do you need to insmod?)
Aug 18 18:48:13 functional-771033 kubelet[6990]: Perhaps ip6tables or your kernel needs to be upgraded.
Aug 18 18:48:13 functional-771033 kubelet[6990]: > table="nat" chain="KUBE-KUBELET-CANARY"
Aug 18 18:48:15 functional-771033 kubelet[6990]: I0818 18:48:15.273598 6990 kubelet.go:1900] "Deleted mirror pod because it is outdated" pod="kube-system/kube-apiserver-functional-771033"
Aug 18 18:48:15 functional-771033 kubelet[6990]: I0818 18:48:15.538733 6990 kubelet.go:1895] "Trying to delete pod" pod="kube-system/kube-apiserver-functional-771033" podUID="50144038-6f50-40ec-91d8-5c6157da045a"
Aug 18 18:48:18 functional-771033 kubelet[6990]: I0818 18:48:18.813044 6990 scope.go:117] "RemoveContainer" containerID="448705e7b9ad526d3670cbbd5fa7edb60a61c1a7e47c95e8090cc8e7590bc712"
Aug 18 18:48:18 functional-771033 kubelet[6990]: E0818 18:48:18.813253 6990 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"storage-provisioner\" with CrashLoopBackOff: \"back-off 40s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(01c74b7d-d168-47d2-8415-af0dcd45453e)\"" pod="kube-system/storage-provisioner" podUID="01c74b7d-d168-47d2-8415-af0dcd45453e"
Aug 18 18:48:29 functional-771033 kubelet[6990]: I0818 18:48:29.813453 6990 scope.go:117] "RemoveContainer" containerID="448705e7b9ad526d3670cbbd5fa7edb60a61c1a7e47c95e8090cc8e7590bc712"
Aug 18 18:48:29 functional-771033 kubelet[6990]: E0818 18:48:29.813702 6990 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"storage-provisioner\" with CrashLoopBackOff: \"back-off 40s restarting failed container=storage-provisioner pod=storage-provisioner_kube-system(01c74b7d-d168-47d2-8415-af0dcd45453e)\"" pod="kube-system/storage-provisioner" podUID="01c74b7d-d168-47d2-8415-af0dcd45453e"
==> storage-provisioner [448705e7b9ad] <==
I0818 18:48:05.953141 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
F0818 18:48:05.954879 1 main.go:39] error getting server version: Get "https://10.96.0.1:443/version?timeout=32s": dial tcp 10.96.0.1:443: connect: connection refused
-- /stdout --
helpers_test.go:254: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p functional-771033 -n functional-771033
helpers_test.go:261: (dbg) Run: kubectl --context functional-771033 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:285: <<< TestFunctional/serial/ComponentHealth FAILED: end of post-mortem logs <<<
helpers_test.go:286: ---------------------/post-mortem---------------------------------
--- FAIL: TestFunctional/serial/ComponentHealth (1.60s)