=== RUN TestMultiControlPlane/serial/RestartCluster
ha_test.go:562: (dbg) Run: out/minikube-linux-arm64 start -p ha-082404 --wait=true -v=7 --alsologtostderr --driver=docker --container-runtime=docker
E1216 19:55:14.740539 7569 cert_rotation.go:171] "Unhandled Error" err="key failed with : open /home/jenkins/minikube-integration/20091-2258/.minikube/profiles/functional-690644/client.crt: no such file or directory" logger="UnhandledError"
ha_test.go:562: (dbg) Done: out/minikube-linux-arm64 start -p ha-082404 --wait=true -v=7 --alsologtostderr --driver=docker --container-runtime=docker: (1m44.646885839s)
ha_test.go:568: (dbg) Run: out/minikube-linux-arm64 -p ha-082404 status -v=7 --alsologtostderr
ha_test.go:586: (dbg) Run: kubectl get nodes
ha_test.go:591: expected 3 nodes to be Ready, got
-- stdout --
NAME STATUS ROLES AGE VERSION
ha-082404 Ready control-plane 10m v1.32.0
ha-082404-m02 Ready control-plane 9m45s v1.32.0
ha-082404-m04 NotReady <none> 8m9s v1.32.0
-- /stdout --
ha_test.go:594: (dbg) Run: kubectl get nodes -o "go-template='{{range .items}}{{range .status.conditions}}{{if eq .type "Ready"}} {{.status}}{{"\n"}}{{end}}{{end}}{{end}}'"
ha_test.go:599: expected 3 nodes Ready status to be True, got
-- stdout --
' True
True
Unknown
'
-- /stdout --
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:230: ======> post-mortem[TestMultiControlPlane/serial/RestartCluster]: docker inspect <======
helpers_test.go:231: (dbg) Run: docker inspect ha-082404
helpers_test.go:235: (dbg) docker inspect ha-082404:
-- stdout --
[
{
"Id": "df79637e07d1fa9b770fdad3a3220b4d498aee0558c4946d136f873d151dccd1",
"Created": "2024-12-16T19:45:53.934864238Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 103886,
"ExitCode": 0,
"Error": "",
"StartedAt": "2024-12-16T19:54:50.589481723Z",
"FinishedAt": "2024-12-16T19:54:49.800833038Z"
},
"Image": "sha256:7cd263f59e19eeefdb79b99186c433854c2243e3d7fa2988b2d817cac7fc54f8",
"ResolvConfPath": "/var/lib/docker/containers/df79637e07d1fa9b770fdad3a3220b4d498aee0558c4946d136f873d151dccd1/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/df79637e07d1fa9b770fdad3a3220b4d498aee0558c4946d136f873d151dccd1/hostname",
"HostsPath": "/var/lib/docker/containers/df79637e07d1fa9b770fdad3a3220b4d498aee0558c4946d136f873d151dccd1/hosts",
"LogPath": "/var/lib/docker/containers/df79637e07d1fa9b770fdad3a3220b4d498aee0558c4946d136f873d151dccd1/df79637e07d1fa9b770fdad3a3220b4d498aee0558c4946d136f873d151dccd1-json.log",
"Name": "/ha-082404",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"ha-082404:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {}
},
"NetworkMode": "ha-082404",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 2306867200,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 4613734400,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"LowerDir": "/var/lib/docker/overlay2/bd50fb4651c36616cd0e4597826049bf118bf361421e50a1dc7bb417cc5c40e9-init/diff:/var/lib/docker/overlay2/acc364fe6cd4e3915e2c087c9731511b8036f6f5517ed637cb16c71fff260f76/diff",
"MergedDir": "/var/lib/docker/overlay2/bd50fb4651c36616cd0e4597826049bf118bf361421e50a1dc7bb417cc5c40e9/merged",
"UpperDir": "/var/lib/docker/overlay2/bd50fb4651c36616cd0e4597826049bf118bf361421e50a1dc7bb417cc5c40e9/diff",
"WorkDir": "/var/lib/docker/overlay2/bd50fb4651c36616cd0e4597826049bf118bf361421e50a1dc7bb417cc5c40e9/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "ha-082404",
"Source": "/var/lib/docker/volumes/ha-082404/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "ha-082404",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1734029593-20090@sha256:7b3f6168a578563fb342f21f0c926652b91ba060931e8fbc6c6ade3ac1d26ed9",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "ha-082404",
"name.minikube.sigs.k8s.io": "ha-082404",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "26e8215efe50033d482a3cf80230649afa6ad72069555c764461aa73d989da4b",
"SandboxKey": "/var/run/docker/netns/26e8215efe50",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32828"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32829"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32832"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32830"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32831"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"ha-082404": {
"IPAMConfig": {
"IPv4Address": "192.168.49.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "02:42:c0:a8:31:02",
"DriverOpts": null,
"NetworkID": "a15d316ef2180ac97e1a928fbbc2c912357b4f33526d08bdd6091d50fcb70614",
"EndpointID": "c1acd7b1247149870ecf0824a65b79b11e1c73a0672bd91763a9b5f05245e4ce",
"Gateway": "192.168.49.1",
"IPAddress": "192.168.49.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"ha-082404",
"df79637e07d1"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:239: (dbg) Run: out/minikube-linux-arm64 status --format={{.Host}} -p ha-082404 -n ha-082404
helpers_test.go:244: <<< TestMultiControlPlane/serial/RestartCluster FAILED: start of post-mortem logs <<<
helpers_test.go:245: ======> post-mortem[TestMultiControlPlane/serial/RestartCluster]: minikube logs <======
helpers_test.go:247: (dbg) Run: out/minikube-linux-arm64 -p ha-082404 logs -n 25
helpers_test.go:247: (dbg) Done: out/minikube-linux-arm64 -p ha-082404 logs -n 25: (1.689215145s)
helpers_test.go:252: TestMultiControlPlane/serial/RestartCluster logs:
-- stdout --
==> Audit <==
|---------|----------------------------------------------------------------------------------|-----------|---------|---------|---------------------|---------------------|
| Command | Args | Profile | User | Version | Start Time | End Time |
|---------|----------------------------------------------------------------------------------|-----------|---------|---------|---------------------|---------------------|
| cp | ha-082404 cp ha-082404-m03:/home/docker/cp-test.txt | ha-082404 | jenkins | v1.34.0 | 16 Dec 24 19:48 UTC | 16 Dec 24 19:48 UTC |
| | ha-082404-m04:/home/docker/cp-test_ha-082404-m03_ha-082404-m04.txt | | | | | |
| ssh | ha-082404 ssh -n | ha-082404 | jenkins | v1.34.0 | 16 Dec 24 19:48 UTC | 16 Dec 24 19:48 UTC |
| | ha-082404-m03 sudo cat | | | | | |
| | /home/docker/cp-test.txt | | | | | |
| ssh | ha-082404 ssh -n ha-082404-m04 sudo cat | ha-082404 | jenkins | v1.34.0 | 16 Dec 24 19:48 UTC | 16 Dec 24 19:48 UTC |
| | /home/docker/cp-test_ha-082404-m03_ha-082404-m04.txt | | | | | |
| cp | ha-082404 cp testdata/cp-test.txt | ha-082404 | jenkins | v1.34.0 | 16 Dec 24 19:48 UTC | 16 Dec 24 19:48 UTC |
| | ha-082404-m04:/home/docker/cp-test.txt | | | | | |
| ssh | ha-082404 ssh -n | ha-082404 | jenkins | v1.34.0 | 16 Dec 24 19:48 UTC | 16 Dec 24 19:48 UTC |
| | ha-082404-m04 sudo cat | | | | | |
| | /home/docker/cp-test.txt | | | | | |
| cp | ha-082404 cp ha-082404-m04:/home/docker/cp-test.txt | ha-082404 | jenkins | v1.34.0 | 16 Dec 24 19:48 UTC | 16 Dec 24 19:48 UTC |
| | /tmp/TestMultiControlPlaneserialCopyFile3791437405/001/cp-test_ha-082404-m04.txt | | | | | |
| ssh | ha-082404 ssh -n | ha-082404 | jenkins | v1.34.0 | 16 Dec 24 19:48 UTC | 16 Dec 24 19:48 UTC |
| | ha-082404-m04 sudo cat | | | | | |
| | /home/docker/cp-test.txt | | | | | |
| cp | ha-082404 cp ha-082404-m04:/home/docker/cp-test.txt | ha-082404 | jenkins | v1.34.0 | 16 Dec 24 19:48 UTC | 16 Dec 24 19:48 UTC |
| | ha-082404:/home/docker/cp-test_ha-082404-m04_ha-082404.txt | | | | | |
| ssh | ha-082404 ssh -n | ha-082404 | jenkins | v1.34.0 | 16 Dec 24 19:48 UTC | 16 Dec 24 19:48 UTC |
| | ha-082404-m04 sudo cat | | | | | |
| | /home/docker/cp-test.txt | | | | | |
| ssh | ha-082404 ssh -n ha-082404 sudo cat | ha-082404 | jenkins | v1.34.0 | 16 Dec 24 19:48 UTC | 16 Dec 24 19:48 UTC |
| | /home/docker/cp-test_ha-082404-m04_ha-082404.txt | | | | | |
| cp | ha-082404 cp ha-082404-m04:/home/docker/cp-test.txt | ha-082404 | jenkins | v1.34.0 | 16 Dec 24 19:48 UTC | 16 Dec 24 19:48 UTC |
| | ha-082404-m02:/home/docker/cp-test_ha-082404-m04_ha-082404-m02.txt | | | | | |
| ssh | ha-082404 ssh -n | ha-082404 | jenkins | v1.34.0 | 16 Dec 24 19:48 UTC | 16 Dec 24 19:48 UTC |
| | ha-082404-m04 sudo cat | | | | | |
| | /home/docker/cp-test.txt | | | | | |
| ssh | ha-082404 ssh -n ha-082404-m02 sudo cat | ha-082404 | jenkins | v1.34.0 | 16 Dec 24 19:48 UTC | 16 Dec 24 19:48 UTC |
| | /home/docker/cp-test_ha-082404-m04_ha-082404-m02.txt | | | | | |
| cp | ha-082404 cp ha-082404-m04:/home/docker/cp-test.txt | ha-082404 | jenkins | v1.34.0 | 16 Dec 24 19:48 UTC | 16 Dec 24 19:48 UTC |
| | ha-082404-m03:/home/docker/cp-test_ha-082404-m04_ha-082404-m03.txt | | | | | |
| ssh | ha-082404 ssh -n | ha-082404 | jenkins | v1.34.0 | 16 Dec 24 19:48 UTC | 16 Dec 24 19:48 UTC |
| | ha-082404-m04 sudo cat | | | | | |
| | /home/docker/cp-test.txt | | | | | |
| ssh | ha-082404 ssh -n ha-082404-m03 sudo cat | ha-082404 | jenkins | v1.34.0 | 16 Dec 24 19:48 UTC | 16 Dec 24 19:48 UTC |
| | /home/docker/cp-test_ha-082404-m04_ha-082404-m03.txt | | | | | |
| node | ha-082404 node stop m02 -v=7 | ha-082404 | jenkins | v1.34.0 | 16 Dec 24 19:48 UTC | 16 Dec 24 19:49 UTC |
| | --alsologtostderr | | | | | |
| node | ha-082404 node start m02 -v=7 | ha-082404 | jenkins | v1.34.0 | 16 Dec 24 19:49 UTC | 16 Dec 24 19:49 UTC |
| | --alsologtostderr | | | | | |
| node | list -p ha-082404 -v=7 | ha-082404 | jenkins | v1.34.0 | 16 Dec 24 19:49 UTC | |
| | --alsologtostderr | | | | | |
| stop | -p ha-082404 -v=7 | ha-082404 | jenkins | v1.34.0 | 16 Dec 24 19:49 UTC | 16 Dec 24 19:50 UTC |
| | --alsologtostderr | | | | | |
| start | -p ha-082404 --wait=true -v=7 | ha-082404 | jenkins | v1.34.0 | 16 Dec 24 19:50 UTC | 16 Dec 24 19:54 UTC |
| | --alsologtostderr | | | | | |
| node | list -p ha-082404 | ha-082404 | jenkins | v1.34.0 | 16 Dec 24 19:54 UTC | |
| node | ha-082404 node delete m03 -v=7 | ha-082404 | jenkins | v1.34.0 | 16 Dec 24 19:54 UTC | 16 Dec 24 19:54 UTC |
| | --alsologtostderr | | | | | |
| stop | ha-082404 stop -v=7 | ha-082404 | jenkins | v1.34.0 | 16 Dec 24 19:54 UTC | 16 Dec 24 19:54 UTC |
| | --alsologtostderr | | | | | |
| start | -p ha-082404 --wait=true | ha-082404 | jenkins | v1.34.0 | 16 Dec 24 19:54 UTC | 16 Dec 24 19:56 UTC |
| | -v=7 --alsologtostderr | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
|---------|----------------------------------------------------------------------------------|-----------|---------|---------|---------------------|---------------------|
==> Last Start <==
Log file created at: 2024/12/16 19:54:50
Running on machine: ip-172-31-29-130
Binary: Built with gc go1.23.3 for linux/arm64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1216 19:54:50.235257 103685 out.go:345] Setting OutFile to fd 1 ...
I1216 19:54:50.235605 103685 out.go:392] TERM=,COLORTERM=, which probably does not support color
I1216 19:54:50.235621 103685 out.go:358] Setting ErrFile to fd 2...
I1216 19:54:50.235627 103685 out.go:392] TERM=,COLORTERM=, which probably does not support color
I1216 19:54:50.235898 103685 root.go:338] Updating PATH: /home/jenkins/minikube-integration/20091-2258/.minikube/bin
I1216 19:54:50.236312 103685 out.go:352] Setting JSON to false
I1216 19:54:50.237210 103685 start.go:129] hostinfo: {"hostname":"ip-172-31-29-130","uptime":2236,"bootTime":1734376655,"procs":185,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1072-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"36adf542-ef4f-4e2d-a0c8-6868d1383ff9"}
I1216 19:54:50.237289 103685 start.go:139] virtualization:
I1216 19:54:50.240598 103685 out.go:177] * [ha-082404] minikube v1.34.0 on Ubuntu 20.04 (arm64)
I1216 19:54:50.244098 103685 out.go:177] - MINIKUBE_LOCATION=20091
I1216 19:54:50.244279 103685 notify.go:220] Checking for updates...
I1216 19:54:50.249718 103685 out.go:177] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1216 19:54:50.252408 103685 out.go:177] - KUBECONFIG=/home/jenkins/minikube-integration/20091-2258/kubeconfig
I1216 19:54:50.255139 103685 out.go:177] - MINIKUBE_HOME=/home/jenkins/minikube-integration/20091-2258/.minikube
I1216 19:54:50.257744 103685 out.go:177] - MINIKUBE_BIN=out/minikube-linux-arm64
I1216 19:54:50.260357 103685 out.go:177] - MINIKUBE_FORCE_SYSTEMD=
I1216 19:54:50.263576 103685 config.go:182] Loaded profile config "ha-082404": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.32.0
I1216 19:54:50.264171 103685 driver.go:394] Setting default libvirt URI to qemu:///system
I1216 19:54:50.294096 103685 docker.go:123] docker version: linux-27.4.0:Docker Engine - Community
I1216 19:54:50.294229 103685 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1216 19:54:50.348245 103685 info.go:266] docker info: {ID:U5VK:ZNT5:35M3:FHLW:Q7TL:ELFX:BNAG:AV4T:UD2H:SK5L:SEJV:SJJL Containers:3 ContainersRunning:0 ContainersPaused:0 ContainersStopped:3 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:38 OomKillDisable:true NGoroutines:41 SystemTime:2024-12-16 19:54:50.339273471 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1072-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214831104 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-29-130 Labels:[] ExperimentalBuild:false ServerVersion:27.4.0 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:88bf19b2105c8b17560993bee28a01ddc2f97182 Expected:88bf19b2105c8b17560993bee28a01ddc2f97182} RuncCommit:{ID:v1.2.2-0-g7cb3632 Expected:v1.2.2-0-g7cb3632} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:[WARNING: bridge
-nf-call-iptables is disabled WARNING: bridge-nf-call-ip6tables is disabled] ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.19.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.31.0]] Warnings:<nil>}}
I1216 19:54:50.348362 103685 docker.go:318] overlay module found
I1216 19:54:50.351393 103685 out.go:177] * Using the docker driver based on existing profile
I1216 19:54:50.353949 103685 start.go:297] selected driver: docker
I1216 19:54:50.353971 103685 start.go:901] validating driver "docker" against &{Name:ha-082404 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1734029593-20090@sha256:7b3f6168a578563fb342f21f0c926652b91ba060931e8fbc6c6ade3ac1d26ed9 Memory:2200 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.32.0 ClusterName:ha-082404 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName
:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.32.0 ContainerRuntime:docker ControlPlane:true Worker:true} {Name:m02 IP:192.168.49.3 Port:8443 KubernetesVersion:v1.32.0 ContainerRuntime:docker ControlPlane:true Worker:true} {Name:m04 IP:192.168.49.5 Port:0 KubernetesVersion:v1.32.0 ContainerRuntime: ControlPlane:false Worker:true}] Addons:map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:false efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false ku
beflow:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:false storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVM
netClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1216 19:54:50.354126 103685 start.go:912] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1216 19:54:50.354232 103685 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1216 19:54:50.407087 103685 info.go:266] docker info: {ID:U5VK:ZNT5:35M3:FHLW:Q7TL:ELFX:BNAG:AV4T:UD2H:SK5L:SEJV:SJJL Containers:3 ContainersRunning:0 ContainersPaused:0 ContainersStopped:3 Images:3 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:38 OomKillDisable:true NGoroutines:41 SystemTime:2024-12-16 19:54:50.397788268 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1072-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214831104 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-29-130 Labels:[] ExperimentalBuild:false ServerVersion:27.4.0 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:88bf19b2105c8b17560993bee28a01ddc2f97182 Expected:88bf19b2105c8b17560993bee28a01ddc2f97182} RuncCommit:{ID:v1.2.2-0-g7cb3632 Expected:v1.2.2-0-g7cb3632} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:[WARNING: bridge
-nf-call-iptables is disabled WARNING: bridge-nf-call-ip6tables is disabled] ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.19.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.31.0]] Warnings:<nil>}}
I1216 19:54:50.407591 103685 start_flags.go:947] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1216 19:54:50.407625 103685 cni.go:84] Creating CNI manager for ""
I1216 19:54:50.407672 103685 cni.go:136] multinode detected (3 nodes found), recommending kindnet
I1216 19:54:50.407726 103685 start.go:340] cluster config:
{Name:ha-082404 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1734029593-20090@sha256:7b3f6168a578563fb342f21f0c926652b91ba060931e8fbc6c6ade3ac1d26ed9 Memory:2200 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.32.0 ClusterName:ha-082404 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerR
untime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.32.0 ContainerRuntime:docker ControlPlane:true Worker:true} {Name:m02 IP:192.168.49.3 Port:8443 KubernetesVersion:v1.32.0 ContainerRuntime:docker ControlPlane:true Worker:true} {Name:m04 IP:192.168.49.5 Port:0 KubernetesVersion:v1.32.0 ContainerRuntime:docker ControlPlane:false Worker:true}] Addons:map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:false efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:f
alse nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:false storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs:
AutoPauseInterval:1m0s}
I1216 19:54:50.410682 103685 out.go:177] * Starting "ha-082404" primary control-plane node in "ha-082404" cluster
I1216 19:54:50.413209 103685 cache.go:121] Beginning downloading kic base image for docker with docker
I1216 19:54:50.415916 103685 out.go:177] * Pulling base image v0.0.45-1734029593-20090 ...
I1216 19:54:50.418524 103685 preload.go:131] Checking if preload exists for k8s version v1.32.0 and runtime docker
I1216 19:54:50.418582 103685 preload.go:146] Found local preload: /home/jenkins/minikube-integration/20091-2258/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.32.0-docker-overlay2-arm64.tar.lz4
I1216 19:54:50.418597 103685 cache.go:56] Caching tarball of preloaded images
I1216 19:54:50.418702 103685 preload.go:172] Found /home/jenkins/minikube-integration/20091-2258/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.32.0-docker-overlay2-arm64.tar.lz4 in cache, skipping download
I1216 19:54:50.418718 103685 cache.go:59] Finished verifying existence of preloaded tar for v1.32.0 on docker
I1216 19:54:50.418860 103685 profile.go:143] Saving config to /home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/config.json ...
I1216 19:54:50.419128 103685 image.go:79] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1734029593-20090@sha256:7b3f6168a578563fb342f21f0c926652b91ba060931e8fbc6c6ade3ac1d26ed9 in local docker daemon
I1216 19:54:50.438566 103685 image.go:98] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1734029593-20090@sha256:7b3f6168a578563fb342f21f0c926652b91ba060931e8fbc6c6ade3ac1d26ed9 in local docker daemon, skipping pull
I1216 19:54:50.438589 103685 cache.go:144] gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1734029593-20090@sha256:7b3f6168a578563fb342f21f0c926652b91ba060931e8fbc6c6ade3ac1d26ed9 exists in daemon, skipping load
I1216 19:54:50.438609 103685 cache.go:194] Successfully downloaded all kic artifacts
I1216 19:54:50.438632 103685 start.go:360] acquireMachinesLock for ha-082404: {Name:mk4ec7695b5b4eab6f186b464ef40ca9938b783b Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1216 19:54:50.438694 103685 start.go:364] duration metric: took 44.372µs to acquireMachinesLock for "ha-082404"
I1216 19:54:50.438717 103685 start.go:96] Skipping create...Using existing machine configuration
I1216 19:54:50.438735 103685 fix.go:54] fixHost starting:
I1216 19:54:50.438987 103685 cli_runner.go:164] Run: docker container inspect ha-082404 --format={{.State.Status}}
I1216 19:54:50.455675 103685 fix.go:112] recreateIfNeeded on ha-082404: state=Stopped err=<nil>
W1216 19:54:50.455708 103685 fix.go:138] unexpected machine state, will restart: <nil>
I1216 19:54:50.458876 103685 out.go:177] * Restarting existing docker container for "ha-082404" ...
I1216 19:54:50.461554 103685 cli_runner.go:164] Run: docker start ha-082404
I1216 19:54:50.754291 103685 cli_runner.go:164] Run: docker container inspect ha-082404 --format={{.State.Status}}
I1216 19:54:50.778480 103685 kic.go:430] container "ha-082404" state is running.
I1216 19:54:50.781122 103685 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-082404
I1216 19:54:50.807553 103685 profile.go:143] Saving config to /home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/config.json ...
I1216 19:54:50.807902 103685 machine.go:93] provisionDockerMachine start ...
I1216 19:54:50.808019 103685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-082404
I1216 19:54:50.835064 103685 main.go:141] libmachine: Using SSH client type: native
I1216 19:54:50.835550 103685 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x416340] 0x418b80 <nil> [] 0s} 127.0.0.1 32828 <nil> <nil>}
I1216 19:54:50.835611 103685 main.go:141] libmachine: About to run SSH command:
hostname
I1216 19:54:50.836330 103685 main.go:141] libmachine: Error dialing TCP: ssh: handshake failed: EOF
I1216 19:54:53.981301 103685 main.go:141] libmachine: SSH cmd err, output: <nil>: ha-082404
I1216 19:54:53.981327 103685 ubuntu.go:169] provisioning hostname "ha-082404"
I1216 19:54:53.981391 103685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-082404
I1216 19:54:53.998828 103685 main.go:141] libmachine: Using SSH client type: native
I1216 19:54:53.999082 103685 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x416340] 0x418b80 <nil> [] 0s} 127.0.0.1 32828 <nil> <nil>}
I1216 19:54:53.999098 103685 main.go:141] libmachine: About to run SSH command:
sudo hostname ha-082404 && echo "ha-082404" | sudo tee /etc/hostname
I1216 19:54:54.162240 103685 main.go:141] libmachine: SSH cmd err, output: <nil>: ha-082404
I1216 19:54:54.162318 103685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-082404
I1216 19:54:54.181153 103685 main.go:141] libmachine: Using SSH client type: native
I1216 19:54:54.181400 103685 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x416340] 0x418b80 <nil> [] 0s} 127.0.0.1 32828 <nil> <nil>}
I1216 19:54:54.181422 103685 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\sha-082404' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 ha-082404/g' /etc/hosts;
else
echo '127.0.1.1 ha-082404' | sudo tee -a /etc/hosts;
fi
fi
I1216 19:54:54.326081 103685 main.go:141] libmachine: SSH cmd err, output: <nil>:
I1216 19:54:54.326112 103685 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/20091-2258/.minikube CaCertPath:/home/jenkins/minikube-integration/20091-2258/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/20091-2258/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/20091-2258/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/20091-2258/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/20091-2258/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/20091-2258/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/20091-2258/.minikube}
I1216 19:54:54.326154 103685 ubuntu.go:177] setting up certificates
I1216 19:54:54.326164 103685 provision.go:84] configureAuth start
I1216 19:54:54.326249 103685 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-082404
I1216 19:54:54.345500 103685 provision.go:143] copyHostCerts
I1216 19:54:54.345555 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/certs/key.pem -> /home/jenkins/minikube-integration/20091-2258/.minikube/key.pem
I1216 19:54:54.345602 103685 exec_runner.go:144] found /home/jenkins/minikube-integration/20091-2258/.minikube/key.pem, removing ...
I1216 19:54:54.345612 103685 exec_runner.go:203] rm: /home/jenkins/minikube-integration/20091-2258/.minikube/key.pem
I1216 19:54:54.345709 103685 exec_runner.go:151] cp: /home/jenkins/minikube-integration/20091-2258/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/20091-2258/.minikube/key.pem (1675 bytes)
I1216 19:54:54.345860 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/certs/ca.pem -> /home/jenkins/minikube-integration/20091-2258/.minikube/ca.pem
I1216 19:54:54.345896 103685 exec_runner.go:144] found /home/jenkins/minikube-integration/20091-2258/.minikube/ca.pem, removing ...
I1216 19:54:54.345907 103685 exec_runner.go:203] rm: /home/jenkins/minikube-integration/20091-2258/.minikube/ca.pem
I1216 19:54:54.345959 103685 exec_runner.go:151] cp: /home/jenkins/minikube-integration/20091-2258/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/20091-2258/.minikube/ca.pem (1082 bytes)
I1216 19:54:54.346016 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/certs/cert.pem -> /home/jenkins/minikube-integration/20091-2258/.minikube/cert.pem
I1216 19:54:54.346037 103685 exec_runner.go:144] found /home/jenkins/minikube-integration/20091-2258/.minikube/cert.pem, removing ...
I1216 19:54:54.346047 103685 exec_runner.go:203] rm: /home/jenkins/minikube-integration/20091-2258/.minikube/cert.pem
I1216 19:54:54.346077 103685 exec_runner.go:151] cp: /home/jenkins/minikube-integration/20091-2258/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/20091-2258/.minikube/cert.pem (1123 bytes)
I1216 19:54:54.346147 103685 provision.go:117] generating server cert: /home/jenkins/minikube-integration/20091-2258/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/20091-2258/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/20091-2258/.minikube/certs/ca-key.pem org=jenkins.ha-082404 san=[127.0.0.1 192.168.49.2 ha-082404 localhost minikube]
I1216 19:54:54.904241 103685 provision.go:177] copyRemoteCerts
I1216 19:54:54.904315 103685 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1216 19:54:54.904365 103685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-082404
I1216 19:54:54.921108 103685 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32828 SSHKeyPath:/home/jenkins/minikube-integration/20091-2258/.minikube/machines/ha-082404/id_rsa Username:docker}
I1216 19:54:55.030019 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/certs/ca.pem -> /etc/docker/ca.pem
I1216 19:54:55.030090 103685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20091-2258/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1216 19:54:55.057253 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/machines/server.pem -> /etc/docker/server.pem
I1216 19:54:55.057324 103685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20091-2258/.minikube/machines/server.pem --> /etc/docker/server.pem (1200 bytes)
I1216 19:54:55.082359 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/machines/server-key.pem -> /etc/docker/server-key.pem
I1216 19:54:55.082445 103685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20091-2258/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I1216 19:54:55.108687 103685 provision.go:87] duration metric: took 782.507179ms to configureAuth
I1216 19:54:55.108722 103685 ubuntu.go:193] setting minikube options for container-runtime
I1216 19:54:55.109007 103685 config.go:182] Loaded profile config "ha-082404": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.32.0
I1216 19:54:55.109078 103685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-082404
I1216 19:54:55.126596 103685 main.go:141] libmachine: Using SSH client type: native
I1216 19:54:55.126855 103685 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x416340] 0x418b80 <nil> [] 0s} 127.0.0.1 32828 <nil> <nil>}
I1216 19:54:55.126871 103685 main.go:141] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I1216 19:54:55.274532 103685 main.go:141] libmachine: SSH cmd err, output: <nil>: overlay
I1216 19:54:55.274554 103685 ubuntu.go:71] root file system type: overlay
I1216 19:54:55.274673 103685 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I1216 19:54:55.274763 103685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-082404
I1216 19:54:55.294733 103685 main.go:141] libmachine: Using SSH client type: native
I1216 19:54:55.294984 103685 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x416340] 0x418b80 <nil> [] 0s} 127.0.0.1 32828 <nil> <nil>}
I1216 19:54:55.295071 103685 main.go:141] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I1216 19:54:55.454750 103685 main.go:141] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
I1216 19:54:55.454860 103685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-082404
I1216 19:54:55.472685 103685 main.go:141] libmachine: Using SSH client type: native
I1216 19:54:55.472946 103685 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x416340] 0x418b80 <nil> [] 0s} 127.0.0.1 32828 <nil> <nil>}
I1216 19:54:55.472971 103685 main.go:141] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I1216 19:54:55.622904 103685 main.go:141] libmachine: SSH cmd err, output: <nil>:
I1216 19:54:55.622929 103685 machine.go:96] duration metric: took 4.814982156s to provisionDockerMachine
I1216 19:54:55.622941 103685 start.go:293] postStartSetup for "ha-082404" (driver="docker")
I1216 19:54:55.622970 103685 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1216 19:54:55.623046 103685 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1216 19:54:55.623097 103685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-082404
I1216 19:54:55.641060 103685 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32828 SSHKeyPath:/home/jenkins/minikube-integration/20091-2258/.minikube/machines/ha-082404/id_rsa Username:docker}
I1216 19:54:55.742856 103685 ssh_runner.go:195] Run: cat /etc/os-release
I1216 19:54:55.745983 103685 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1216 19:54:55.746022 103685 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I1216 19:54:55.746032 103685 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I1216 19:54:55.746039 103685 info.go:137] Remote host: Ubuntu 22.04.5 LTS
I1216 19:54:55.746050 103685 filesync.go:126] Scanning /home/jenkins/minikube-integration/20091-2258/.minikube/addons for local assets ...
I1216 19:54:55.746103 103685 filesync.go:126] Scanning /home/jenkins/minikube-integration/20091-2258/.minikube/files for local assets ...
I1216 19:54:55.746185 103685 filesync.go:149] local asset: /home/jenkins/minikube-integration/20091-2258/.minikube/files/etc/ssl/certs/75692.pem -> 75692.pem in /etc/ssl/certs
I1216 19:54:55.746197 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/files/etc/ssl/certs/75692.pem -> /etc/ssl/certs/75692.pem
I1216 19:54:55.746340 103685 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1216 19:54:55.755138 103685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20091-2258/.minikube/files/etc/ssl/certs/75692.pem --> /etc/ssl/certs/75692.pem (1708 bytes)
I1216 19:54:55.779124 103685 start.go:296] duration metric: took 156.167284ms for postStartSetup
I1216 19:54:55.779250 103685 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1216 19:54:55.779325 103685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-082404
I1216 19:54:55.795868 103685 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32828 SSHKeyPath:/home/jenkins/minikube-integration/20091-2258/.minikube/machines/ha-082404/id_rsa Username:docker}
I1216 19:54:55.895240 103685 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1216 19:54:55.900217 103685 fix.go:56] duration metric: took 5.461483028s for fixHost
I1216 19:54:55.900247 103685 start.go:83] releasing machines lock for "ha-082404", held for 5.461540766s
I1216 19:54:55.900324 103685 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-082404
I1216 19:54:55.918466 103685 ssh_runner.go:195] Run: cat /version.json
I1216 19:54:55.918504 103685 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1216 19:54:55.918519 103685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-082404
I1216 19:54:55.918576 103685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-082404
I1216 19:54:55.942605 103685 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32828 SSHKeyPath:/home/jenkins/minikube-integration/20091-2258/.minikube/machines/ha-082404/id_rsa Username:docker}
I1216 19:54:55.952264 103685 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32828 SSHKeyPath:/home/jenkins/minikube-integration/20091-2258/.minikube/machines/ha-082404/id_rsa Username:docker}
I1216 19:54:56.041667 103685 ssh_runner.go:195] Run: systemctl --version
I1216 19:54:56.181578 103685 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I1216 19:54:56.186034 103685 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I1216 19:54:56.205951 103685 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I1216 19:54:56.206045 103685 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1216 19:54:56.216277 103685 cni.go:259] no active bridge cni configs found in "/etc/cni/net.d" - nothing to disable
I1216 19:54:56.216345 103685 start.go:495] detecting cgroup driver to use...
I1216 19:54:56.216385 103685 detect.go:187] detected "cgroupfs" cgroup driver on host os
I1216 19:54:56.216485 103685 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1216 19:54:56.232849 103685 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10"|' /etc/containerd/config.toml"
I1216 19:54:56.243392 103685 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1216 19:54:56.252853 103685 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I1216 19:54:56.252977 103685 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I1216 19:54:56.262606 103685 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1216 19:54:56.272430 103685 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1216 19:54:56.282714 103685 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1216 19:54:56.292801 103685 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1216 19:54:56.302054 103685 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1216 19:54:56.312528 103685 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1216 19:54:56.323188 103685 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1216 19:54:56.333215 103685 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1216 19:54:56.342029 103685 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1216 19:54:56.351817 103685 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1216 19:54:56.438998 103685 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1216 19:54:56.546269 103685 start.go:495] detecting cgroup driver to use...
I1216 19:54:56.546316 103685 detect.go:187] detected "cgroupfs" cgroup driver on host os
I1216 19:54:56.546367 103685 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I1216 19:54:56.558872 103685 cruntime.go:279] skipping containerd shutdown because we are bound to it
I1216 19:54:56.559013 103685 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1216 19:54:56.575737 103685 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I1216 19:54:56.592717 103685 ssh_runner.go:195] Run: which cri-dockerd
I1216 19:54:56.596523 103685 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I1216 19:54:56.605046 103685 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (190 bytes)
I1216 19:54:56.626911 103685 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I1216 19:54:56.735467 103685 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I1216 19:54:56.828873 103685 docker.go:574] configuring docker to use "cgroupfs" as cgroup driver...
I1216 19:54:56.828999 103685 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
I1216 19:54:56.847620 103685 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1216 19:54:56.953368 103685 ssh_runner.go:195] Run: sudo systemctl restart docker
I1216 19:54:57.583024 103685 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I1216 19:54:57.594520 103685 ssh_runner.go:195] Run: sudo systemctl stop cri-docker.socket
I1216 19:54:57.607339 103685 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1216 19:54:57.618944 103685 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I1216 19:54:57.698960 103685 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I1216 19:54:57.787136 103685 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1216 19:54:57.873932 103685 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I1216 19:54:57.887773 103685 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1216 19:54:57.899001 103685 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1216 19:54:57.987177 103685 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I1216 19:54:58.078665 103685 start.go:542] Will wait 60s for socket path /var/run/cri-dockerd.sock
I1216 19:54:58.078785 103685 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I1216 19:54:58.083610 103685 start.go:563] Will wait 60s for crictl version
I1216 19:54:58.083728 103685 ssh_runner.go:195] Run: which crictl
I1216 19:54:58.087553 103685 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I1216 19:54:58.143003 103685 start.go:579] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 27.4.0
RuntimeApiVersion: v1
I1216 19:54:58.143098 103685 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I1216 19:54:58.166668 103685 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I1216 19:54:58.192791 103685 out.go:235] * Preparing Kubernetes v1.32.0 on Docker 27.4.0 ...
I1216 19:54:58.192924 103685 cli_runner.go:164] Run: docker network inspect ha-082404 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1216 19:54:58.208935 103685 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I1216 19:54:58.212582 103685 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1216 19:54:58.223683 103685 kubeadm.go:883] updating cluster {Name:ha-082404 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1734029593-20090@sha256:7b3f6168a578563fb342f21f0c926652b91ba060931e8fbc6c6ade3ac1d26ed9 Memory:2200 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.32.0 ClusterName:ha-082404 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APISe
rverNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.32.0 ContainerRuntime:docker ControlPlane:true Worker:true} {Name:m02 IP:192.168.49.3 Port:8443 KubernetesVersion:v1.32.0 ContainerRuntime:docker ControlPlane:true Worker:true} {Name:m04 IP:192.168.49.5 Port:0 KubernetesVersion:v1.32.0 ContainerRuntime:docker ControlPlane:false Worker:true}] Addons:map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:false efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:fals
e kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:false storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPa
th: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1216 19:54:58.223844 103685 preload.go:131] Checking if preload exists for k8s version v1.32.0 and runtime docker
I1216 19:54:58.223906 103685 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I1216 19:54:58.243738 103685 docker.go:689] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.32.0
registry.k8s.io/kube-scheduler:v1.32.0
registry.k8s.io/kube-controller-manager:v1.32.0
registry.k8s.io/kube-proxy:v1.32.0
ghcr.io/kube-vip/kube-vip:v0.8.7
kindest/kindnetd:v20241108-5c6d2daf
registry.k8s.io/etcd:3.5.16-0
registry.k8s.io/coredns/coredns:v1.11.3
registry.k8s.io/pause:3.10
gcr.io/k8s-minikube/storage-provisioner:v5
gcr.io/k8s-minikube/busybox:1.28
-- /stdout --
I1216 19:54:58.243762 103685 docker.go:619] Images already preloaded, skipping extraction
I1216 19:54:58.243828 103685 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I1216 19:54:58.263266 103685 docker.go:689] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.32.0
registry.k8s.io/kube-controller-manager:v1.32.0
registry.k8s.io/kube-scheduler:v1.32.0
registry.k8s.io/kube-proxy:v1.32.0
ghcr.io/kube-vip/kube-vip:v0.8.7
kindest/kindnetd:v20241108-5c6d2daf
registry.k8s.io/etcd:3.5.16-0
registry.k8s.io/coredns/coredns:v1.11.3
registry.k8s.io/pause:3.10
gcr.io/k8s-minikube/storage-provisioner:v5
gcr.io/k8s-minikube/busybox:1.28
-- /stdout --
I1216 19:54:58.263292 103685 cache_images.go:84] Images are preloaded, skipping loading
I1216 19:54:58.263303 103685 kubeadm.go:934] updating node { 192.168.49.2 8443 v1.32.0 docker true true} ...
I1216 19:54:58.263406 103685 kubeadm.go:946] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.32.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=ha-082404 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.2
[Install]
config:
{KubernetesVersion:v1.32.0 ClusterName:ha-082404 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1216 19:54:58.263467 103685 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
I1216 19:54:58.318816 103685 cni.go:84] Creating CNI manager for ""
I1216 19:54:58.318844 103685 cni.go:136] multinode detected (3 nodes found), recommending kindnet
I1216 19:54:58.318856 103685 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
I1216 19:54:58.318880 103685 kubeadm.go:189] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8443 KubernetesVersion:v1.32.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:ha-082404 NodeName:ha-082404 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.49.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kubernetes/ma
nifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1216 19:54:58.319014 103685 kubeadm.go:195] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.49.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
name: "ha-082404"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.49.2"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
- name: "proxy-refresh-interval"
value: "70000"
kubernetesVersion: v1.32.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1216 19:54:58.319030 103685 kube-vip.go:115] generating kube-vip config ...
I1216 19:54:58.319079 103685 ssh_runner.go:195] Run: sudo sh -c "lsmod | grep ip_vs"
I1216 19:54:58.330966 103685 kube-vip.go:163] giving up enabling control-plane load-balancing as ipvs kernel modules appears not to be available: sudo sh -c "lsmod | grep ip_vs": Process exited with status 1
stdout:
stderr:
I1216 19:54:58.331112 103685 kube-vip.go:137] kube-vip config:
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
name: kube-vip
namespace: kube-system
spec:
containers:
- args:
- manager
env:
- name: vip_arp
value: "true"
- name: port
value: "8443"
- name: vip_nodename
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: vip_interface
value: eth0
- name: vip_cidr
value: "32"
- name: dns_mode
value: first
- name: cp_enable
value: "true"
- name: cp_namespace
value: kube-system
- name: vip_leaderelection
value: "true"
- name: vip_leasename
value: plndr-cp-lock
- name: vip_leaseduration
value: "5"
- name: vip_renewdeadline
value: "3"
- name: vip_retryperiod
value: "1"
- name: address
value: 192.168.49.254
- name: prometheus_server
value: :2112
image: ghcr.io/kube-vip/kube-vip:v0.8.7
imagePullPolicy: IfNotPresent
name: kube-vip
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_RAW
volumeMounts:
- mountPath: /etc/kubernetes/admin.conf
name: kubeconfig
hostAliases:
- hostnames:
- kubernetes
ip: 127.0.0.1
hostNetwork: true
volumes:
- hostPath:
path: "/etc/kubernetes/admin.conf"
name: kubeconfig
status: {}
I1216 19:54:58.331171 103685 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.32.0
I1216 19:54:58.340122 103685 binaries.go:44] Found k8s binaries, skipping transfer
I1216 19:54:58.340198 103685 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube /etc/kubernetes/manifests
I1216 19:54:58.348925 103685 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (308 bytes)
I1216 19:54:58.367736 103685 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1216 19:54:58.385764 103685 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2287 bytes)
I1216 19:54:58.404015 103685 ssh_runner.go:362] scp memory --> /etc/kubernetes/manifests/kube-vip.yaml (1358 bytes)
I1216 19:54:58.422355 103685 ssh_runner.go:195] Run: grep 192.168.49.254 control-plane.minikube.internal$ /etc/hosts
I1216 19:54:58.425640 103685 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.254 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1216 19:54:58.436197 103685 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1216 19:54:58.515115 103685 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1216 19:54:58.529263 103685 certs.go:68] Setting up /home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404 for IP: 192.168.49.2
I1216 19:54:58.529288 103685 certs.go:194] generating shared ca certs ...
I1216 19:54:58.529304 103685 certs.go:226] acquiring lock for ca certs: {Name:mk61ac4ce13eccd2c732f8ba869cb043f9f7a744 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1216 19:54:58.529448 103685 certs.go:235] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/20091-2258/.minikube/ca.key
I1216 19:54:58.529492 103685 certs.go:235] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/20091-2258/.minikube/proxy-client-ca.key
I1216 19:54:58.529504 103685 certs.go:256] generating profile certs ...
I1216 19:54:58.529580 103685 certs.go:359] skipping valid signed profile cert regeneration for "minikube-user": /home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/client.key
I1216 19:54:58.529611 103685 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/apiserver.key.17c43a0e
I1216 19:54:58.529635 103685 crypto.go:68] Generating cert /home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/apiserver.crt.17c43a0e with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.49.2 192.168.49.3 192.168.49.254]
I1216 19:54:59.106230 103685 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/apiserver.crt.17c43a0e ...
I1216 19:54:59.106312 103685 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/apiserver.crt.17c43a0e: {Name:mkb5077c176c74589f525fc61df79a62d49e81bd Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1216 19:54:59.106542 103685 crypto.go:164] Writing key to /home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/apiserver.key.17c43a0e ...
I1216 19:54:59.106585 103685 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/apiserver.key.17c43a0e: {Name:mk80e1751f9aa9d1369db3bc6fa2413f9cb2e303 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1216 19:54:59.106716 103685 certs.go:381] copying /home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/apiserver.crt.17c43a0e -> /home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/apiserver.crt
I1216 19:54:59.106904 103685 certs.go:385] copying /home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/apiserver.key.17c43a0e -> /home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/apiserver.key
I1216 19:54:59.107087 103685 certs.go:359] skipping valid signed profile cert regeneration for "aggregator": /home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/proxy-client.key
I1216 19:54:59.107122 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/ca.crt -> /var/lib/minikube/certs/ca.crt
I1216 19:54:59.107155 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/ca.key -> /var/lib/minikube/certs/ca.key
I1216 19:54:59.107197 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/proxy-client-ca.crt -> /var/lib/minikube/certs/proxy-client-ca.crt
I1216 19:54:59.107233 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/proxy-client-ca.key -> /var/lib/minikube/certs/proxy-client-ca.key
I1216 19:54:59.107263 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/apiserver.crt -> /var/lib/minikube/certs/apiserver.crt
I1216 19:54:59.107305 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/apiserver.key -> /var/lib/minikube/certs/apiserver.key
I1216 19:54:59.107342 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/proxy-client.crt -> /var/lib/minikube/certs/proxy-client.crt
I1216 19:54:59.107373 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/proxy-client.key -> /var/lib/minikube/certs/proxy-client.key
I1216 19:54:59.107459 103685 certs.go:484] found cert: /home/jenkins/minikube-integration/20091-2258/.minikube/certs/7569.pem (1338 bytes)
W1216 19:54:59.107517 103685 certs.go:480] ignoring /home/jenkins/minikube-integration/20091-2258/.minikube/certs/7569_empty.pem, impossibly tiny 0 bytes
I1216 19:54:59.107543 103685 certs.go:484] found cert: /home/jenkins/minikube-integration/20091-2258/.minikube/certs/ca-key.pem (1679 bytes)
I1216 19:54:59.107596 103685 certs.go:484] found cert: /home/jenkins/minikube-integration/20091-2258/.minikube/certs/ca.pem (1082 bytes)
I1216 19:54:59.107646 103685 certs.go:484] found cert: /home/jenkins/minikube-integration/20091-2258/.minikube/certs/cert.pem (1123 bytes)
I1216 19:54:59.107708 103685 certs.go:484] found cert: /home/jenkins/minikube-integration/20091-2258/.minikube/certs/key.pem (1675 bytes)
I1216 19:54:59.107813 103685 certs.go:484] found cert: /home/jenkins/minikube-integration/20091-2258/.minikube/files/etc/ssl/certs/75692.pem (1708 bytes)
I1216 19:54:59.107882 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/files/etc/ssl/certs/75692.pem -> /usr/share/ca-certificates/75692.pem
I1216 19:54:59.107925 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/ca.crt -> /usr/share/ca-certificates/minikubeCA.pem
I1216 19:54:59.107957 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/certs/7569.pem -> /usr/share/ca-certificates/7569.pem
I1216 19:54:59.108614 103685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20091-2258/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1216 19:54:59.140083 103685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20091-2258/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1216 19:54:59.172974 103685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20091-2258/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1216 19:54:59.205651 103685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20091-2258/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I1216 19:54:59.237280 103685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1432 bytes)
I1216 19:54:59.266686 103685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I1216 19:54:59.291319 103685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1216 19:54:59.315312 103685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I1216 19:54:59.338877 103685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20091-2258/.minikube/files/etc/ssl/certs/75692.pem --> /usr/share/ca-certificates/75692.pem (1708 bytes)
I1216 19:54:59.362955 103685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20091-2258/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1216 19:54:59.387791 103685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20091-2258/.minikube/certs/7569.pem --> /usr/share/ca-certificates/7569.pem (1338 bytes)
I1216 19:54:59.412062 103685 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1216 19:54:59.429383 103685 ssh_runner.go:195] Run: openssl version
I1216 19:54:59.434899 103685 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1216 19:54:59.444083 103685 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1216 19:54:59.447496 103685 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Dec 16 19:35 /usr/share/ca-certificates/minikubeCA.pem
I1216 19:54:59.447565 103685 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1216 19:54:59.454415 103685 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1216 19:54:59.463276 103685 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/7569.pem && ln -fs /usr/share/ca-certificates/7569.pem /etc/ssl/certs/7569.pem"
I1216 19:54:59.472588 103685 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/7569.pem
I1216 19:54:59.476401 103685 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Dec 16 19:42 /usr/share/ca-certificates/7569.pem
I1216 19:54:59.476467 103685 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/7569.pem
I1216 19:54:59.483706 103685 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/7569.pem /etc/ssl/certs/51391683.0"
I1216 19:54:59.492978 103685 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/75692.pem && ln -fs /usr/share/ca-certificates/75692.pem /etc/ssl/certs/75692.pem"
I1216 19:54:59.502425 103685 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/75692.pem
I1216 19:54:59.505794 103685 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Dec 16 19:42 /usr/share/ca-certificates/75692.pem
I1216 19:54:59.505929 103685 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/75692.pem
I1216 19:54:59.513172 103685 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/75692.pem /etc/ssl/certs/3ec20f2e.0"
I1216 19:54:59.522065 103685 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1216 19:54:59.525439 103685 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-etcd-client.crt -checkend 86400
I1216 19:54:59.532467 103685 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-kubelet-client.crt -checkend 86400
I1216 19:54:59.540038 103685 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/server.crt -checkend 86400
I1216 19:54:59.546757 103685 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/healthcheck-client.crt -checkend 86400
I1216 19:54:59.553560 103685 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/peer.crt -checkend 86400
I1216 19:54:59.560388 103685 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/front-proxy-client.crt -checkend 86400
I1216 19:54:59.567458 103685 kubeadm.go:392] StartCluster: {Name:ha-082404 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1734029593-20090@sha256:7b3f6168a578563fb342f21f0c926652b91ba060931e8fbc6c6ade3ac1d26ed9 Memory:2200 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.32.0 ClusterName:ha-082404 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServe
rNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.32.0 ContainerRuntime:docker ControlPlane:true Worker:true} {Name:m02 IP:192.168.49.3 Port:8443 KubernetesVersion:v1.32.0 ContainerRuntime:docker ControlPlane:true Worker:true} {Name:m04 IP:192.168.49.5 Port:0 KubernetesVersion:v1.32.0 ContainerRuntime:docker ControlPlane:false Worker:true}] Addons:map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:false efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false k
ubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:false storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:true ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath:
SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1216 19:54:59.567640 103685 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I1216 19:54:59.586026 103685 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1216 19:54:59.594797 103685 kubeadm.go:408] found existing configuration files, will attempt cluster restart
I1216 19:54:59.594817 103685 kubeadm.go:593] restartPrimaryControlPlane start ...
I1216 19:54:59.594887 103685 ssh_runner.go:195] Run: sudo test -d /data/minikube
I1216 19:54:59.603257 103685 kubeadm.go:130] /data/minikube skipping compat symlinks: sudo test -d /data/minikube: Process exited with status 1
stdout:
stderr:
I1216 19:54:59.603684 103685 kubeconfig.go:47] verify endpoint returned: get endpoint: "ha-082404" does not appear in /home/jenkins/minikube-integration/20091-2258/kubeconfig
I1216 19:54:59.603795 103685 kubeconfig.go:62] /home/jenkins/minikube-integration/20091-2258/kubeconfig needs updating (will repair): [kubeconfig missing "ha-082404" cluster setting kubeconfig missing "ha-082404" context setting]
I1216 19:54:59.604067 103685 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20091-2258/kubeconfig: {Name:mka70734b2114420160cdb9aedbb0d97125ea129 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1216 19:54:59.604463 103685 loader.go:395] Config loaded from file: /home/jenkins/minikube-integration/20091-2258/kubeconfig
I1216 19:54:59.604712 103685 kapi.go:59] client config for ha-082404: &rest.Config{Host:"https://192.168.49.2:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/client.crt", KeyFile:"/home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/client.key", CAFile:"/home/jenkins/minikube-integration/20091-2258/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(nil), CAData:[]uint8(nil), NextProtos:[]string(nil)}, Us
erAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x1eafe20), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
I1216 19:54:59.605356 103685 ssh_runner.go:195] Run: sudo diff -u /var/tmp/minikube/kubeadm.yaml /var/tmp/minikube/kubeadm.yaml.new
I1216 19:54:59.605430 103685 cert_rotation.go:140] Starting client certificate rotation controller
I1216 19:54:59.614078 103685 kubeadm.go:630] The running cluster does not require reconfiguration: 192.168.49.2
I1216 19:54:59.614104 103685 kubeadm.go:597] duration metric: took 19.280385ms to restartPrimaryControlPlane
I1216 19:54:59.614131 103685 kubeadm.go:394] duration metric: took 46.681301ms to StartCluster
I1216 19:54:59.614151 103685 settings.go:142] acquiring lock: {Name:mkf2c060c99b8151a60e25cdfc7df7912c0c88fe Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1216 19:54:59.614236 103685 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/20091-2258/kubeconfig
I1216 19:54:59.614847 103685 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20091-2258/kubeconfig: {Name:mka70734b2114420160cdb9aedbb0d97125ea129 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1216 19:54:59.615081 103685 start.go:233] HA (multi-control plane) cluster: will skip waiting for primary control-plane node &{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.32.0 ContainerRuntime:docker ControlPlane:true Worker:true}
I1216 19:54:59.615108 103685 start.go:241] waiting for startup goroutines ...
I1216 19:54:59.615116 103685 addons.go:507] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:false efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:false storage-provisioner-gluster:false storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false]
I1216 19:54:59.615382 103685 config.go:182] Loaded profile config "ha-082404": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.32.0
I1216 19:54:59.619643 103685 out.go:177] * Enabled addons:
I1216 19:54:59.622371 103685 addons.go:510] duration metric: took 7.248467ms for enable addons: enabled=[]
I1216 19:54:59.622413 103685 start.go:246] waiting for cluster config update ...
I1216 19:54:59.622423 103685 start.go:255] writing updated cluster config ...
I1216 19:54:59.625414 103685 out.go:201]
I1216 19:54:59.628384 103685 config.go:182] Loaded profile config "ha-082404": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.32.0
I1216 19:54:59.628496 103685 profile.go:143] Saving config to /home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/config.json ...
I1216 19:54:59.631440 103685 out.go:177] * Starting "ha-082404-m02" control-plane node in "ha-082404" cluster
I1216 19:54:59.634101 103685 cache.go:121] Beginning downloading kic base image for docker with docker
I1216 19:54:59.636722 103685 out.go:177] * Pulling base image v0.0.45-1734029593-20090 ...
I1216 19:54:59.639322 103685 preload.go:131] Checking if preload exists for k8s version v1.32.0 and runtime docker
I1216 19:54:59.639344 103685 image.go:79] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1734029593-20090@sha256:7b3f6168a578563fb342f21f0c926652b91ba060931e8fbc6c6ade3ac1d26ed9 in local docker daemon
I1216 19:54:59.639347 103685 cache.go:56] Caching tarball of preloaded images
I1216 19:54:59.639521 103685 preload.go:172] Found /home/jenkins/minikube-integration/20091-2258/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.32.0-docker-overlay2-arm64.tar.lz4 in cache, skipping download
I1216 19:54:59.639532 103685 cache.go:59] Finished verifying existence of preloaded tar for v1.32.0 on docker
I1216 19:54:59.639667 103685 profile.go:143] Saving config to /home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/config.json ...
I1216 19:54:59.666296 103685 image.go:98] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1734029593-20090@sha256:7b3f6168a578563fb342f21f0c926652b91ba060931e8fbc6c6ade3ac1d26ed9 in local docker daemon, skipping pull
I1216 19:54:59.666321 103685 cache.go:144] gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1734029593-20090@sha256:7b3f6168a578563fb342f21f0c926652b91ba060931e8fbc6c6ade3ac1d26ed9 exists in daemon, skipping load
I1216 19:54:59.666339 103685 cache.go:194] Successfully downloaded all kic artifacts
I1216 19:54:59.666364 103685 start.go:360] acquireMachinesLock for ha-082404-m02: {Name:mk30a416a7c89b14eeb36c6dcc0c87eda00f817a Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1216 19:54:59.666459 103685 start.go:364] duration metric: took 74.854µs to acquireMachinesLock for "ha-082404-m02"
I1216 19:54:59.666488 103685 start.go:96] Skipping create...Using existing machine configuration
I1216 19:54:59.666500 103685 fix.go:54] fixHost starting: m02
I1216 19:54:59.666795 103685 cli_runner.go:164] Run: docker container inspect ha-082404-m02 --format={{.State.Status}}
I1216 19:54:59.683448 103685 fix.go:112] recreateIfNeeded on ha-082404-m02: state=Stopped err=<nil>
W1216 19:54:59.683475 103685 fix.go:138] unexpected machine state, will restart: <nil>
I1216 19:54:59.686569 103685 out.go:177] * Restarting existing docker container for "ha-082404-m02" ...
I1216 19:54:59.689312 103685 cli_runner.go:164] Run: docker start ha-082404-m02
I1216 19:54:59.992531 103685 cli_runner.go:164] Run: docker container inspect ha-082404-m02 --format={{.State.Status}}
I1216 19:55:00.038531 103685 kic.go:430] container "ha-082404-m02" state is running.
I1216 19:55:00.039194 103685 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-082404-m02
I1216 19:55:00.088761 103685 profile.go:143] Saving config to /home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/config.json ...
I1216 19:55:00.089057 103685 machine.go:93] provisionDockerMachine start ...
I1216 19:55:00.089127 103685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-082404-m02
I1216 19:55:00.136382 103685 main.go:141] libmachine: Using SSH client type: native
I1216 19:55:00.137258 103685 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x416340] 0x418b80 <nil> [] 0s} 127.0.0.1 32833 <nil> <nil>}
I1216 19:55:00.137292 103685 main.go:141] libmachine: About to run SSH command:
hostname
I1216 19:55:00.139151 103685 main.go:141] libmachine: Error dialing TCP: ssh: handshake failed: read tcp 127.0.0.1:40988->127.0.0.1:32833: read: connection reset by peer
I1216 19:55:03.421424 103685 main.go:141] libmachine: SSH cmd err, output: <nil>: ha-082404-m02
I1216 19:55:03.421452 103685 ubuntu.go:169] provisioning hostname "ha-082404-m02"
I1216 19:55:03.421523 103685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-082404-m02
I1216 19:55:03.458046 103685 main.go:141] libmachine: Using SSH client type: native
I1216 19:55:03.458300 103685 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x416340] 0x418b80 <nil> [] 0s} 127.0.0.1 32833 <nil> <nil>}
I1216 19:55:03.458319 103685 main.go:141] libmachine: About to run SSH command:
sudo hostname ha-082404-m02 && echo "ha-082404-m02" | sudo tee /etc/hostname
I1216 19:55:03.670477 103685 main.go:141] libmachine: SSH cmd err, output: <nil>: ha-082404-m02
I1216 19:55:03.670576 103685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-082404-m02
I1216 19:55:03.703537 103685 main.go:141] libmachine: Using SSH client type: native
I1216 19:55:03.703797 103685 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x416340] 0x418b80 <nil> [] 0s} 127.0.0.1 32833 <nil> <nil>}
I1216 19:55:03.703820 103685 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\sha-082404-m02' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 ha-082404-m02/g' /etc/hosts;
else
echo '127.0.1.1 ha-082404-m02' | sudo tee -a /etc/hosts;
fi
fi
I1216 19:55:03.910025 103685 main.go:141] libmachine: SSH cmd err, output: <nil>:
I1216 19:55:03.910097 103685 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/20091-2258/.minikube CaCertPath:/home/jenkins/minikube-integration/20091-2258/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/20091-2258/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/20091-2258/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/20091-2258/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/20091-2258/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/20091-2258/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/20091-2258/.minikube}
I1216 19:55:03.910150 103685 ubuntu.go:177] setting up certificates
I1216 19:55:03.910174 103685 provision.go:84] configureAuth start
I1216 19:55:03.910268 103685 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-082404-m02
I1216 19:55:03.940499 103685 provision.go:143] copyHostCerts
I1216 19:55:03.940541 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/certs/ca.pem -> /home/jenkins/minikube-integration/20091-2258/.minikube/ca.pem
I1216 19:55:03.940575 103685 exec_runner.go:144] found /home/jenkins/minikube-integration/20091-2258/.minikube/ca.pem, removing ...
I1216 19:55:03.940582 103685 exec_runner.go:203] rm: /home/jenkins/minikube-integration/20091-2258/.minikube/ca.pem
I1216 19:55:03.940661 103685 exec_runner.go:151] cp: /home/jenkins/minikube-integration/20091-2258/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/20091-2258/.minikube/ca.pem (1082 bytes)
I1216 19:55:03.940740 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/certs/cert.pem -> /home/jenkins/minikube-integration/20091-2258/.minikube/cert.pem
I1216 19:55:03.940758 103685 exec_runner.go:144] found /home/jenkins/minikube-integration/20091-2258/.minikube/cert.pem, removing ...
I1216 19:55:03.940762 103685 exec_runner.go:203] rm: /home/jenkins/minikube-integration/20091-2258/.minikube/cert.pem
I1216 19:55:03.940789 103685 exec_runner.go:151] cp: /home/jenkins/minikube-integration/20091-2258/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/20091-2258/.minikube/cert.pem (1123 bytes)
I1216 19:55:03.940831 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/certs/key.pem -> /home/jenkins/minikube-integration/20091-2258/.minikube/key.pem
I1216 19:55:03.940847 103685 exec_runner.go:144] found /home/jenkins/minikube-integration/20091-2258/.minikube/key.pem, removing ...
I1216 19:55:03.940851 103685 exec_runner.go:203] rm: /home/jenkins/minikube-integration/20091-2258/.minikube/key.pem
I1216 19:55:03.940874 103685 exec_runner.go:151] cp: /home/jenkins/minikube-integration/20091-2258/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/20091-2258/.minikube/key.pem (1675 bytes)
I1216 19:55:03.940916 103685 provision.go:117] generating server cert: /home/jenkins/minikube-integration/20091-2258/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/20091-2258/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/20091-2258/.minikube/certs/ca-key.pem org=jenkins.ha-082404-m02 san=[127.0.0.1 192.168.49.3 ha-082404-m02 localhost minikube]
I1216 19:55:04.776804 103685 provision.go:177] copyRemoteCerts
I1216 19:55:04.776962 103685 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1216 19:55:04.777031 103685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-082404-m02
I1216 19:55:04.814135 103685 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32833 SSHKeyPath:/home/jenkins/minikube-integration/20091-2258/.minikube/machines/ha-082404-m02/id_rsa Username:docker}
I1216 19:55:04.953594 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/machines/server.pem -> /etc/docker/server.pem
I1216 19:55:04.953659 103685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20091-2258/.minikube/machines/server.pem --> /etc/docker/server.pem (1208 bytes)
I1216 19:55:05.047654 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/machines/server-key.pem -> /etc/docker/server-key.pem
I1216 19:55:05.047724 103685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20091-2258/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I1216 19:55:05.145481 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/certs/ca.pem -> /etc/docker/ca.pem
I1216 19:55:05.145553 103685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20091-2258/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1216 19:55:05.240923 103685 provision.go:87] duration metric: took 1.330722824s to configureAuth
I1216 19:55:05.240989 103685 ubuntu.go:193] setting minikube options for container-runtime
I1216 19:55:05.241247 103685 config.go:182] Loaded profile config "ha-082404": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.32.0
I1216 19:55:05.241328 103685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-082404-m02
I1216 19:55:05.277391 103685 main.go:141] libmachine: Using SSH client type: native
I1216 19:55:05.277630 103685 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x416340] 0x418b80 <nil> [] 0s} 127.0.0.1 32833 <nil> <nil>}
I1216 19:55:05.277648 103685 main.go:141] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I1216 19:55:05.529057 103685 main.go:141] libmachine: SSH cmd err, output: <nil>: overlay
I1216 19:55:05.529093 103685 ubuntu.go:71] root file system type: overlay
I1216 19:55:05.529256 103685 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I1216 19:55:05.529336 103685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-082404-m02
I1216 19:55:05.557743 103685 main.go:141] libmachine: Using SSH client type: native
I1216 19:55:05.558030 103685 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x416340] 0x418b80 <nil> [] 0s} 127.0.0.1 32833 <nil> <nil>}
I1216 19:55:05.558124 103685 main.go:141] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
Environment="NO_PROXY=192.168.49.2"
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I1216 19:55:05.918617 103685 main.go:141] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
Environment=NO_PROXY=192.168.49.2
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
I1216 19:55:05.918721 103685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-082404-m02
I1216 19:55:05.955939 103685 main.go:141] libmachine: Using SSH client type: native
I1216 19:55:05.956182 103685 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x416340] 0x418b80 <nil> [] 0s} 127.0.0.1 32833 <nil> <nil>}
I1216 19:55:05.956206 103685 main.go:141] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I1216 19:55:06.272638 103685 main.go:141] libmachine: SSH cmd err, output: <nil>:
I1216 19:55:06.272666 103685 machine.go:96] duration metric: took 6.183599134s to provisionDockerMachine
I1216 19:55:06.272688 103685 start.go:293] postStartSetup for "ha-082404-m02" (driver="docker")
I1216 19:55:06.272702 103685 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1216 19:55:06.272812 103685 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1216 19:55:06.272870 103685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-082404-m02
I1216 19:55:06.302083 103685 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32833 SSHKeyPath:/home/jenkins/minikube-integration/20091-2258/.minikube/machines/ha-082404-m02/id_rsa Username:docker}
I1216 19:55:06.455395 103685 ssh_runner.go:195] Run: cat /etc/os-release
I1216 19:55:06.468222 103685 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1216 19:55:06.468258 103685 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I1216 19:55:06.468269 103685 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I1216 19:55:06.468276 103685 info.go:137] Remote host: Ubuntu 22.04.5 LTS
I1216 19:55:06.468286 103685 filesync.go:126] Scanning /home/jenkins/minikube-integration/20091-2258/.minikube/addons for local assets ...
I1216 19:55:06.468348 103685 filesync.go:126] Scanning /home/jenkins/minikube-integration/20091-2258/.minikube/files for local assets ...
I1216 19:55:06.468419 103685 filesync.go:149] local asset: /home/jenkins/minikube-integration/20091-2258/.minikube/files/etc/ssl/certs/75692.pem -> 75692.pem in /etc/ssl/certs
I1216 19:55:06.468426 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/files/etc/ssl/certs/75692.pem -> /etc/ssl/certs/75692.pem
I1216 19:55:06.468566 103685 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1216 19:55:06.515880 103685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20091-2258/.minikube/files/etc/ssl/certs/75692.pem --> /etc/ssl/certs/75692.pem (1708 bytes)
I1216 19:55:06.621375 103685 start.go:296] duration metric: took 348.670218ms for postStartSetup
I1216 19:55:06.621521 103685 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1216 19:55:06.621585 103685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-082404-m02
I1216 19:55:06.651491 103685 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32833 SSHKeyPath:/home/jenkins/minikube-integration/20091-2258/.minikube/machines/ha-082404-m02/id_rsa Username:docker}
I1216 19:55:06.870190 103685 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1216 19:55:06.882948 103685 fix.go:56] duration metric: took 7.216431829s for fixHost
I1216 19:55:06.882979 103685 start.go:83] releasing machines lock for "ha-082404-m02", held for 7.216502423s
I1216 19:55:06.883051 103685 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-082404-m02
I1216 19:55:06.910860 103685 out.go:177] * Found network options:
I1216 19:55:06.913461 103685 out.go:177] - NO_PROXY=192.168.49.2
W1216 19:55:06.916056 103685 proxy.go:119] fail to check proxy env: Error ip not in block
W1216 19:55:06.916096 103685 proxy.go:119] fail to check proxy env: Error ip not in block
I1216 19:55:06.916173 103685 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I1216 19:55:06.916226 103685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-082404-m02
I1216 19:55:06.916474 103685 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1216 19:55:06.916529 103685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-082404-m02
I1216 19:55:06.944136 103685 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32833 SSHKeyPath:/home/jenkins/minikube-integration/20091-2258/.minikube/machines/ha-082404-m02/id_rsa Username:docker}
I1216 19:55:06.962005 103685 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32833 SSHKeyPath:/home/jenkins/minikube-integration/20091-2258/.minikube/machines/ha-082404-m02/id_rsa Username:docker}
I1216 19:55:07.107812 103685 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I1216 19:55:07.419056 103685 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I1216 19:55:07.419148 103685 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1216 19:55:07.462281 103685 cni.go:259] no active bridge cni configs found in "/etc/cni/net.d" - nothing to disable
I1216 19:55:07.462313 103685 start.go:495] detecting cgroup driver to use...
I1216 19:55:07.462349 103685 detect.go:187] detected "cgroupfs" cgroup driver on host os
I1216 19:55:07.462449 103685 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1216 19:55:07.508709 103685 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10"|' /etc/containerd/config.toml"
I1216 19:55:07.554955 103685 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1216 19:55:07.586888 103685 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I1216 19:55:07.586998 103685 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I1216 19:55:07.624519 103685 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1216 19:55:07.663465 103685 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1216 19:55:07.702298 103685 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1216 19:55:07.798132 103685 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1216 19:55:07.864468 103685 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1216 19:55:07.884618 103685 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1216 19:55:07.897281 103685 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1216 19:55:07.912463 103685 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1216 19:55:07.922985 103685 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1216 19:55:07.932600 103685 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1216 19:55:08.273297 103685 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1216 19:55:18.949564 103685 ssh_runner.go:235] Completed: sudo systemctl restart containerd: (10.676216235s)
I1216 19:55:18.949594 103685 start.go:495] detecting cgroup driver to use...
I1216 19:55:18.949631 103685 detect.go:187] detected "cgroupfs" cgroup driver on host os
I1216 19:55:18.949684 103685 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I1216 19:55:18.986837 103685 cruntime.go:279] skipping containerd shutdown because we are bound to it
I1216 19:55:18.986923 103685 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1216 19:55:19.029599 103685 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I1216 19:55:19.071750 103685 ssh_runner.go:195] Run: which cri-dockerd
I1216 19:55:19.087660 103685 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I1216 19:55:19.114528 103685 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (190 bytes)
I1216 19:55:19.162019 103685 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I1216 19:55:19.331558 103685 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I1216 19:55:19.483311 103685 docker.go:574] configuring docker to use "cgroupfs" as cgroup driver...
I1216 19:55:19.483351 103685 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
I1216 19:55:19.536577 103685 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1216 19:55:19.726366 103685 ssh_runner.go:195] Run: sudo systemctl restart docker
I1216 19:55:20.529557 103685 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I1216 19:55:20.542543 103685 ssh_runner.go:195] Run: sudo systemctl stop cri-docker.socket
I1216 19:55:20.565866 103685 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1216 19:55:20.580744 103685 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I1216 19:55:20.728570 103685 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I1216 19:55:20.878448 103685 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1216 19:55:21.028622 103685 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I1216 19:55:21.053976 103685 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1216 19:55:21.068190 103685 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1216 19:55:21.226576 103685 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I1216 19:55:21.367914 103685 start.go:542] Will wait 60s for socket path /var/run/cri-dockerd.sock
I1216 19:55:21.367992 103685 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I1216 19:55:21.373459 103685 start.go:563] Will wait 60s for crictl version
I1216 19:55:21.373602 103685 ssh_runner.go:195] Run: which crictl
I1216 19:55:21.386387 103685 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I1216 19:55:21.460177 103685 start.go:579] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 27.4.0
RuntimeApiVersion: v1
I1216 19:55:21.460289 103685 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I1216 19:55:21.516482 103685 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I1216 19:55:21.573328 103685 out.go:235] * Preparing Kubernetes v1.32.0 on Docker 27.4.0 ...
I1216 19:55:21.576172 103685 out.go:177] - env NO_PROXY=192.168.49.2
I1216 19:55:21.579121 103685 cli_runner.go:164] Run: docker network inspect ha-082404 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1216 19:55:21.597795 103685 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I1216 19:55:21.607190 103685 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1216 19:55:21.626026 103685 mustload.go:65] Loading cluster: ha-082404
I1216 19:55:21.626269 103685 config.go:182] Loaded profile config "ha-082404": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.32.0
I1216 19:55:21.626552 103685 cli_runner.go:164] Run: docker container inspect ha-082404 --format={{.State.Status}}
I1216 19:55:21.650189 103685 host.go:66] Checking if "ha-082404" exists ...
I1216 19:55:21.650468 103685 certs.go:68] Setting up /home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404 for IP: 192.168.49.3
I1216 19:55:21.650476 103685 certs.go:194] generating shared ca certs ...
I1216 19:55:21.650489 103685 certs.go:226] acquiring lock for ca certs: {Name:mk61ac4ce13eccd2c732f8ba869cb043f9f7a744 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1216 19:55:21.650597 103685 certs.go:235] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/20091-2258/.minikube/ca.key
I1216 19:55:21.650636 103685 certs.go:235] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/20091-2258/.minikube/proxy-client-ca.key
I1216 19:55:21.650644 103685 certs.go:256] generating profile certs ...
I1216 19:55:21.650716 103685 certs.go:359] skipping valid signed profile cert regeneration for "minikube-user": /home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/client.key
I1216 19:55:21.650776 103685 certs.go:359] skipping valid signed profile cert regeneration for "minikube": /home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/apiserver.key.a8075758
I1216 19:55:21.650813 103685 certs.go:359] skipping valid signed profile cert regeneration for "aggregator": /home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/proxy-client.key
I1216 19:55:21.650820 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/ca.crt -> /var/lib/minikube/certs/ca.crt
I1216 19:55:21.650832 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/ca.key -> /var/lib/minikube/certs/ca.key
I1216 19:55:21.650842 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/proxy-client-ca.crt -> /var/lib/minikube/certs/proxy-client-ca.crt
I1216 19:55:21.650854 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/proxy-client-ca.key -> /var/lib/minikube/certs/proxy-client-ca.key
I1216 19:55:21.650876 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/apiserver.crt -> /var/lib/minikube/certs/apiserver.crt
I1216 19:55:21.650890 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/apiserver.key -> /var/lib/minikube/certs/apiserver.key
I1216 19:55:21.650911 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/proxy-client.crt -> /var/lib/minikube/certs/proxy-client.crt
I1216 19:55:21.650922 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/proxy-client.key -> /var/lib/minikube/certs/proxy-client.key
I1216 19:55:21.650988 103685 certs.go:484] found cert: /home/jenkins/minikube-integration/20091-2258/.minikube/certs/7569.pem (1338 bytes)
W1216 19:55:21.651021 103685 certs.go:480] ignoring /home/jenkins/minikube-integration/20091-2258/.minikube/certs/7569_empty.pem, impossibly tiny 0 bytes
I1216 19:55:21.651028 103685 certs.go:484] found cert: /home/jenkins/minikube-integration/20091-2258/.minikube/certs/ca-key.pem (1679 bytes)
I1216 19:55:21.651051 103685 certs.go:484] found cert: /home/jenkins/minikube-integration/20091-2258/.minikube/certs/ca.pem (1082 bytes)
I1216 19:55:21.651083 103685 certs.go:484] found cert: /home/jenkins/minikube-integration/20091-2258/.minikube/certs/cert.pem (1123 bytes)
I1216 19:55:21.651103 103685 certs.go:484] found cert: /home/jenkins/minikube-integration/20091-2258/.minikube/certs/key.pem (1675 bytes)
I1216 19:55:21.651146 103685 certs.go:484] found cert: /home/jenkins/minikube-integration/20091-2258/.minikube/files/etc/ssl/certs/75692.pem (1708 bytes)
I1216 19:55:21.651174 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/certs/7569.pem -> /usr/share/ca-certificates/7569.pem
I1216 19:55:21.651188 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/files/etc/ssl/certs/75692.pem -> /usr/share/ca-certificates/75692.pem
I1216 19:55:21.651198 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/ca.crt -> /usr/share/ca-certificates/minikubeCA.pem
I1216 19:55:21.651255 103685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-082404
I1216 19:55:21.679379 103685 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32828 SSHKeyPath:/home/jenkins/minikube-integration/20091-2258/.minikube/machines/ha-082404/id_rsa Username:docker}
I1216 19:55:21.778150 103685 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/sa.pub
I1216 19:55:21.782417 103685 ssh_runner.go:447] scp /var/lib/minikube/certs/sa.pub --> memory (451 bytes)
I1216 19:55:21.800985 103685 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/sa.key
I1216 19:55:21.805624 103685 ssh_runner.go:447] scp /var/lib/minikube/certs/sa.key --> memory (1679 bytes)
I1216 19:55:21.821748 103685 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/front-proxy-ca.crt
I1216 19:55:21.826269 103685 ssh_runner.go:447] scp /var/lib/minikube/certs/front-proxy-ca.crt --> memory (1123 bytes)
I1216 19:55:21.844554 103685 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/front-proxy-ca.key
I1216 19:55:21.849139 103685 ssh_runner.go:447] scp /var/lib/minikube/certs/front-proxy-ca.key --> memory (1675 bytes)
I1216 19:55:21.866105 103685 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/etcd/ca.crt
I1216 19:55:21.870094 103685 ssh_runner.go:447] scp /var/lib/minikube/certs/etcd/ca.crt --> memory (1094 bytes)
I1216 19:55:21.883896 103685 ssh_runner.go:195] Run: stat -c %s /var/lib/minikube/certs/etcd/ca.key
I1216 19:55:21.887737 103685 ssh_runner.go:447] scp /var/lib/minikube/certs/etcd/ca.key --> memory (1675 bytes)
I1216 19:55:21.902032 103685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20091-2258/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1216 19:55:21.937291 103685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20091-2258/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1216 19:55:22.015271 103685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20091-2258/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1216 19:55:22.109960 103685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20091-2258/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I1216 19:55:22.207751 103685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1432 bytes)
I1216 19:55:22.316817 103685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I1216 19:55:22.419036 103685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1216 19:55:22.626344 103685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I1216 19:55:22.749534 103685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20091-2258/.minikube/certs/7569.pem --> /usr/share/ca-certificates/7569.pem (1338 bytes)
I1216 19:55:22.940495 103685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20091-2258/.minikube/files/etc/ssl/certs/75692.pem --> /usr/share/ca-certificates/75692.pem (1708 bytes)
I1216 19:55:23.045839 103685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20091-2258/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1216 19:55:23.093922 103685 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/sa.pub (451 bytes)
I1216 19:55:23.181797 103685 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/sa.key (1679 bytes)
I1216 19:55:23.206553 103685 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/front-proxy-ca.crt (1123 bytes)
I1216 19:55:23.259794 103685 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/front-proxy-ca.key (1675 bytes)
I1216 19:55:23.481206 103685 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/etcd/ca.crt (1094 bytes)
I1216 19:55:23.569983 103685 ssh_runner.go:362] scp memory --> /var/lib/minikube/certs/etcd/ca.key (1675 bytes)
I1216 19:55:23.631141 103685 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (744 bytes)
I1216 19:55:23.654029 103685 ssh_runner.go:195] Run: openssl version
I1216 19:55:23.668189 103685 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/7569.pem && ln -fs /usr/share/ca-certificates/7569.pem /etc/ssl/certs/7569.pem"
I1216 19:55:23.690238 103685 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/7569.pem
I1216 19:55:23.694563 103685 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Dec 16 19:42 /usr/share/ca-certificates/7569.pem
I1216 19:55:23.694678 103685 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/7569.pem
I1216 19:55:23.706744 103685 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/7569.pem /etc/ssl/certs/51391683.0"
I1216 19:55:23.723826 103685 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/75692.pem && ln -fs /usr/share/ca-certificates/75692.pem /etc/ssl/certs/75692.pem"
I1216 19:55:23.750473 103685 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/75692.pem
I1216 19:55:23.763431 103685 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Dec 16 19:42 /usr/share/ca-certificates/75692.pem
I1216 19:55:23.763552 103685 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/75692.pem
I1216 19:55:23.781626 103685 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/75692.pem /etc/ssl/certs/3ec20f2e.0"
I1216 19:55:23.804241 103685 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1216 19:55:23.834870 103685 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1216 19:55:23.839678 103685 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Dec 16 19:35 /usr/share/ca-certificates/minikubeCA.pem
I1216 19:55:23.839783 103685 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1216 19:55:23.847924 103685 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1216 19:55:23.858111 103685 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1216 19:55:23.886190 103685 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-etcd-client.crt -checkend 86400
I1216 19:55:23.915107 103685 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/apiserver-kubelet-client.crt -checkend 86400
I1216 19:55:23.944387 103685 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/server.crt -checkend 86400
I1216 19:55:23.967631 103685 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/healthcheck-client.crt -checkend 86400
I1216 19:55:23.987856 103685 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/etcd/peer.crt -checkend 86400
I1216 19:55:24.006193 103685 ssh_runner.go:195] Run: openssl x509 -noout -in /var/lib/minikube/certs/front-proxy-client.crt -checkend 86400
I1216 19:55:24.029296 103685 kubeadm.go:934] updating node {m02 192.168.49.3 8443 v1.32.0 docker true true} ...
I1216 19:55:24.029467 103685 kubeadm.go:946] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.32.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=ha-082404-m02 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.3
[Install]
config:
{KubernetesVersion:v1.32.0 ClusterName:ha-082404 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1216 19:55:24.029501 103685 kube-vip.go:115] generating kube-vip config ...
I1216 19:55:24.029565 103685 ssh_runner.go:195] Run: sudo sh -c "lsmod | grep ip_vs"
I1216 19:55:24.067949 103685 kube-vip.go:163] giving up enabling control-plane load-balancing as ipvs kernel modules appears not to be available: sudo sh -c "lsmod | grep ip_vs": Process exited with status 1
stdout:
stderr:
I1216 19:55:24.068035 103685 kube-vip.go:137] kube-vip config:
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
name: kube-vip
namespace: kube-system
spec:
containers:
- args:
- manager
env:
- name: vip_arp
value: "true"
- name: port
value: "8443"
- name: vip_nodename
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: vip_interface
value: eth0
- name: vip_cidr
value: "32"
- name: dns_mode
value: first
- name: cp_enable
value: "true"
- name: cp_namespace
value: kube-system
- name: vip_leaderelection
value: "true"
- name: vip_leasename
value: plndr-cp-lock
- name: vip_leaseduration
value: "5"
- name: vip_renewdeadline
value: "3"
- name: vip_retryperiod
value: "1"
- name: address
value: 192.168.49.254
- name: prometheus_server
value: :2112
image: ghcr.io/kube-vip/kube-vip:v0.8.7
imagePullPolicy: IfNotPresent
name: kube-vip
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_RAW
volumeMounts:
- mountPath: /etc/kubernetes/admin.conf
name: kubeconfig
hostAliases:
- hostnames:
- kubernetes
ip: 127.0.0.1
hostNetwork: true
volumes:
- hostPath:
path: "/etc/kubernetes/admin.conf"
name: kubeconfig
status: {}
I1216 19:55:24.068113 103685 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.32.0
I1216 19:55:24.100521 103685 binaries.go:44] Found k8s binaries, skipping transfer
I1216 19:55:24.100620 103685 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /etc/kubernetes/manifests
I1216 19:55:24.134223 103685 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (312 bytes)
I1216 19:55:24.270591 103685 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1216 19:55:24.400174 103685 ssh_runner.go:362] scp memory --> /etc/kubernetes/manifests/kube-vip.yaml (1358 bytes)
I1216 19:55:24.495127 103685 ssh_runner.go:195] Run: grep 192.168.49.254 control-plane.minikube.internal$ /etc/hosts
I1216 19:55:24.516790 103685 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.254 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1216 19:55:24.588634 103685 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1216 19:55:24.836722 103685 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1216 19:55:24.851927 103685 start.go:235] Will wait 6m0s for node &{Name:m02 IP:192.168.49.3 Port:8443 KubernetesVersion:v1.32.0 ContainerRuntime:docker ControlPlane:true Worker:true}
I1216 19:55:24.852248 103685 config.go:182] Loaded profile config "ha-082404": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.32.0
I1216 19:55:24.856762 103685 out.go:177] * Verifying Kubernetes components...
I1216 19:55:24.859293 103685 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1216 19:55:25.070341 103685 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1216 19:55:25.105049 103685 loader.go:395] Config loaded from file: /home/jenkins/minikube-integration/20091-2258/kubeconfig
I1216 19:55:25.105396 103685 kapi.go:59] client config for ha-082404: &rest.Config{Host:"https://192.168.49.254:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/client.crt", KeyFile:"/home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/client.key", CAFile:"/home/jenkins/minikube-integration/20091-2258/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(nil), CAData:[]uint8(nil), NextProtos:[]string(nil)},
UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x1eafe20), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
W1216 19:55:25.105477 103685 kubeadm.go:483] Overriding stale ClientConfig host https://192.168.49.254:8443 with https://192.168.49.2:8443
I1216 19:55:25.105762 103685 node_ready.go:35] waiting up to 6m0s for node "ha-082404-m02" to be "Ready" ...
I1216 19:55:25.105879 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:55:25.105894 103685 round_trippers.go:469] Request Headers:
I1216 19:55:25.105904 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:25.105911 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:25.128275 103685 round_trippers.go:574] Response Status: 200 OK in 22 milliseconds
I1216 19:55:25.131445 103685 node_ready.go:49] node "ha-082404-m02" has status "Ready":"True"
I1216 19:55:25.131474 103685 node_ready.go:38] duration metric: took 25.691092ms for node "ha-082404-m02" to be "Ready" ...
I1216 19:55:25.131485 103685 pod_ready.go:36] extra waiting up to 6m0s for all system-critical pods including labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I1216 19:55:25.131533 103685 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false
I1216 19:55:25.131551 103685 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false
I1216 19:55:25.131611 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods
I1216 19:55:25.131622 103685 round_trippers.go:469] Request Headers:
I1216 19:55:25.131631 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:25.131635 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:25.144276 103685 round_trippers.go:574] Response Status: 200 OK in 12 milliseconds
I1216 19:55:25.160272 103685 pod_ready.go:79] waiting up to 6m0s for pod "coredns-668d6bf9bc-9th4p" in "kube-system" namespace to be "Ready" ...
I1216 19:55:25.160379 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/coredns-668d6bf9bc-9th4p
I1216 19:55:25.160392 103685 round_trippers.go:469] Request Headers:
I1216 19:55:25.160402 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:25.160413 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:25.165794 103685 round_trippers.go:574] Response Status: 200 OK in 5 milliseconds
I1216 19:55:25.166949 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:25.166972 103685 round_trippers.go:469] Request Headers:
I1216 19:55:25.166983 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:25.166987 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:25.171902 103685 round_trippers.go:574] Response Status: 200 OK in 4 milliseconds
I1216 19:55:25.172862 103685 pod_ready.go:93] pod "coredns-668d6bf9bc-9th4p" in "kube-system" namespace has status "Ready":"True"
I1216 19:55:25.172888 103685 pod_ready.go:82] duration metric: took 12.575089ms for pod "coredns-668d6bf9bc-9th4p" in "kube-system" namespace to be "Ready" ...
I1216 19:55:25.172900 103685 pod_ready.go:79] waiting up to 6m0s for pod "coredns-668d6bf9bc-mwl2r" in "kube-system" namespace to be "Ready" ...
I1216 19:55:25.172989 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/coredns-668d6bf9bc-mwl2r
I1216 19:55:25.172999 103685 round_trippers.go:469] Request Headers:
I1216 19:55:25.173009 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:25.173012 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:25.178282 103685 round_trippers.go:574] Response Status: 200 OK in 5 milliseconds
I1216 19:55:25.179299 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:25.179322 103685 round_trippers.go:469] Request Headers:
I1216 19:55:25.179333 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:25.179338 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:25.184016 103685 round_trippers.go:574] Response Status: 200 OK in 4 milliseconds
I1216 19:55:25.184807 103685 pod_ready.go:93] pod "coredns-668d6bf9bc-mwl2r" in "kube-system" namespace has status "Ready":"True"
I1216 19:55:25.184831 103685 pod_ready.go:82] duration metric: took 11.915736ms for pod "coredns-668d6bf9bc-mwl2r" in "kube-system" namespace to be "Ready" ...
I1216 19:55:25.184844 103685 pod_ready.go:79] waiting up to 6m0s for pod "etcd-ha-082404" in "kube-system" namespace to be "Ready" ...
I1216 19:55:25.184921 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/etcd-ha-082404
I1216 19:55:25.184931 103685 round_trippers.go:469] Request Headers:
I1216 19:55:25.184940 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:25.184945 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:25.189559 103685 round_trippers.go:574] Response Status: 200 OK in 4 milliseconds
I1216 19:55:25.190760 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:25.190781 103685 round_trippers.go:469] Request Headers:
I1216 19:55:25.190790 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:25.190795 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:25.193210 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:25.194052 103685 pod_ready.go:93] pod "etcd-ha-082404" in "kube-system" namespace has status "Ready":"True"
I1216 19:55:25.194075 103685 pod_ready.go:82] duration metric: took 9.217454ms for pod "etcd-ha-082404" in "kube-system" namespace to be "Ready" ...
I1216 19:55:25.194088 103685 pod_ready.go:79] waiting up to 6m0s for pod "etcd-ha-082404-m02" in "kube-system" namespace to be "Ready" ...
I1216 19:55:25.194166 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/etcd-ha-082404-m02
I1216 19:55:25.194176 103685 round_trippers.go:469] Request Headers:
I1216 19:55:25.194184 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:25.194192 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:25.198494 103685 round_trippers.go:574] Response Status: 200 OK in 4 milliseconds
I1216 19:55:25.199544 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:55:25.199564 103685 round_trippers.go:469] Request Headers:
I1216 19:55:25.199574 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:25.199578 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:25.202223 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:25.203109 103685 pod_ready.go:93] pod "etcd-ha-082404-m02" in "kube-system" namespace has status "Ready":"True"
I1216 19:55:25.203129 103685 pod_ready.go:82] duration metric: took 9.029644ms for pod "etcd-ha-082404-m02" in "kube-system" namespace to be "Ready" ...
I1216 19:55:25.203141 103685 pod_ready.go:79] waiting up to 6m0s for pod "etcd-ha-082404-m03" in "kube-system" namespace to be "Ready" ...
I1216 19:55:25.306745 103685 request.go:632] Waited for 103.527586ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/etcd-ha-082404-m03
I1216 19:55:25.306807 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/etcd-ha-082404-m03
I1216 19:55:25.306816 103685 round_trippers.go:469] Request Headers:
I1216 19:55:25.306827 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:25.306834 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:25.310211 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:55:25.506572 103685 request.go:632] Waited for 195.24444ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/nodes/ha-082404-m03
I1216 19:55:25.506639 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m03
I1216 19:55:25.506647 103685 round_trippers.go:469] Request Headers:
I1216 19:55:25.506656 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:25.506667 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:25.509682 103685 round_trippers.go:574] Response Status: 404 Not Found in 2 milliseconds
I1216 19:55:25.510157 103685 pod_ready.go:98] node "ha-082404-m03" hosting pod "etcd-ha-082404-m03" in "kube-system" namespace is currently not "Ready" (skipping!): error getting node "ha-082404-m03": nodes "ha-082404-m03" not found
I1216 19:55:25.510177 103685 pod_ready.go:82] duration metric: took 307.021033ms for pod "etcd-ha-082404-m03" in "kube-system" namespace to be "Ready" ...
E1216 19:55:25.510192 103685 pod_ready.go:67] WaitExtra: waitPodCondition: node "ha-082404-m03" hosting pod "etcd-ha-082404-m03" in "kube-system" namespace is currently not "Ready" (skipping!): error getting node "ha-082404-m03": nodes "ha-082404-m03" not found
I1216 19:55:25.510221 103685 pod_ready.go:79] waiting up to 6m0s for pod "kube-apiserver-ha-082404" in "kube-system" namespace to be "Ready" ...
I1216 19:55:25.706522 103685 request.go:632] Waited for 196.219352ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-apiserver-ha-082404
I1216 19:55:25.706580 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-apiserver-ha-082404
I1216 19:55:25.706590 103685 round_trippers.go:469] Request Headers:
I1216 19:55:25.706599 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:25.706603 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:25.709499 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:25.906544 103685 request.go:632] Waited for 196.20958ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:25.906670 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:25.906716 103685 round_trippers.go:469] Request Headers:
I1216 19:55:25.906749 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:25.906772 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:25.911126 103685 round_trippers.go:574] Response Status: 200 OK in 4 milliseconds
I1216 19:55:25.912532 103685 pod_ready.go:93] pod "kube-apiserver-ha-082404" in "kube-system" namespace has status "Ready":"True"
I1216 19:55:25.912604 103685 pod_ready.go:82] duration metric: took 402.367448ms for pod "kube-apiserver-ha-082404" in "kube-system" namespace to be "Ready" ...
I1216 19:55:25.912675 103685 pod_ready.go:79] waiting up to 6m0s for pod "kube-apiserver-ha-082404-m02" in "kube-system" namespace to be "Ready" ...
I1216 19:55:26.106813 103685 request.go:632] Waited for 194.003183ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-apiserver-ha-082404-m02
I1216 19:55:26.106930 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-apiserver-ha-082404-m02
I1216 19:55:26.106944 103685 round_trippers.go:469] Request Headers:
I1216 19:55:26.106953 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:26.106958 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:26.110226 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:55:26.306552 103685 request.go:632] Waited for 195.345332ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:55:26.306658 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:55:26.306672 103685 round_trippers.go:469] Request Headers:
I1216 19:55:26.306682 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:26.306693 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:26.309443 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:26.310047 103685 pod_ready.go:93] pod "kube-apiserver-ha-082404-m02" in "kube-system" namespace has status "Ready":"True"
I1216 19:55:26.310071 103685 pod_ready.go:82] duration metric: took 397.372938ms for pod "kube-apiserver-ha-082404-m02" in "kube-system" namespace to be "Ready" ...
I1216 19:55:26.310084 103685 pod_ready.go:79] waiting up to 6m0s for pod "kube-apiserver-ha-082404-m03" in "kube-system" namespace to be "Ready" ...
I1216 19:55:26.505949 103685 request.go:632] Waited for 195.780248ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-apiserver-ha-082404-m03
I1216 19:55:26.506037 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-apiserver-ha-082404-m03
I1216 19:55:26.506049 103685 round_trippers.go:469] Request Headers:
I1216 19:55:26.506058 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:26.506062 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:26.509007 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:26.705970 103685 request.go:632] Waited for 196.229087ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/nodes/ha-082404-m03
I1216 19:55:26.706034 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m03
I1216 19:55:26.706041 103685 round_trippers.go:469] Request Headers:
I1216 19:55:26.706050 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:26.706056 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:26.711100 103685 round_trippers.go:574] Response Status: 404 Not Found in 5 milliseconds
I1216 19:55:26.711922 103685 pod_ready.go:98] node "ha-082404-m03" hosting pod "kube-apiserver-ha-082404-m03" in "kube-system" namespace is currently not "Ready" (skipping!): error getting node "ha-082404-m03": nodes "ha-082404-m03" not found
I1216 19:55:26.711948 103685 pod_ready.go:82] duration metric: took 401.856071ms for pod "kube-apiserver-ha-082404-m03" in "kube-system" namespace to be "Ready" ...
E1216 19:55:26.711960 103685 pod_ready.go:67] WaitExtra: waitPodCondition: node "ha-082404-m03" hosting pod "kube-apiserver-ha-082404-m03" in "kube-system" namespace is currently not "Ready" (skipping!): error getting node "ha-082404-m03": nodes "ha-082404-m03" not found
I1216 19:55:26.711969 103685 pod_ready.go:79] waiting up to 6m0s for pod "kube-controller-manager-ha-082404" in "kube-system" namespace to be "Ready" ...
I1216 19:55:26.906226 103685 request.go:632] Waited for 194.136589ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:26.906338 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:26.906376 103685 round_trippers.go:469] Request Headers:
I1216 19:55:26.906404 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:26.906422 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:26.909406 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:27.106393 103685 request.go:632] Waited for 195.318856ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:27.106478 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:27.106492 103685 round_trippers.go:469] Request Headers:
I1216 19:55:27.106542 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:27.106546 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:27.109508 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:27.306320 103685 request.go:632] Waited for 93.141374ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:27.306391 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:27.306404 103685 round_trippers.go:469] Request Headers:
I1216 19:55:27.306414 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:27.306419 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:27.309527 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:55:27.506675 103685 request.go:632] Waited for 196.268082ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:27.506751 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:27.506763 103685 round_trippers.go:469] Request Headers:
I1216 19:55:27.506773 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:27.506791 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:27.509888 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:55:27.712525 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:27.712551 103685 round_trippers.go:469] Request Headers:
I1216 19:55:27.712562 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:27.712566 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:27.716140 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:55:27.906379 103685 request.go:632] Waited for 189.198247ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:27.906452 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:27.906464 103685 round_trippers.go:469] Request Headers:
I1216 19:55:27.906472 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:27.906479 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:27.909221 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:28.212227 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:28.212252 103685 round_trippers.go:469] Request Headers:
I1216 19:55:28.212261 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:28.212264 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:28.215409 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:55:28.306156 103685 request.go:632] Waited for 89.700659ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:28.306218 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:28.306227 103685 round_trippers.go:469] Request Headers:
I1216 19:55:28.306236 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:28.306241 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:28.308923 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:28.713168 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:28.713193 103685 round_trippers.go:469] Request Headers:
I1216 19:55:28.713203 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:28.713207 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:28.716013 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:28.716909 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:28.716929 103685 round_trippers.go:469] Request Headers:
I1216 19:55:28.716939 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:28.716945 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:28.719477 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:28.720001 103685 pod_ready.go:103] pod "kube-controller-manager-ha-082404" in "kube-system" namespace has status "Ready":"False"
I1216 19:55:29.212793 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:29.212821 103685 round_trippers.go:469] Request Headers:
I1216 19:55:29.212836 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:29.212843 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:29.218265 103685 round_trippers.go:574] Response Status: 200 OK in 5 milliseconds
I1216 19:55:29.219106 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:29.219146 103685 round_trippers.go:469] Request Headers:
I1216 19:55:29.219185 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:29.219211 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:29.225492 103685 round_trippers.go:574] Response Status: 200 OK in 6 milliseconds
I1216 19:55:29.712225 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:29.712249 103685 round_trippers.go:469] Request Headers:
I1216 19:55:29.712259 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:29.712264 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:29.715290 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:55:29.716443 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:29.716502 103685 round_trippers.go:469] Request Headers:
I1216 19:55:29.716528 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:29.716545 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:29.719469 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:30.212209 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:30.212263 103685 round_trippers.go:469] Request Headers:
I1216 19:55:30.212280 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:30.212290 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:30.217367 103685 round_trippers.go:574] Response Status: 200 OK in 5 milliseconds
I1216 19:55:30.219240 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:30.219271 103685 round_trippers.go:469] Request Headers:
I1216 19:55:30.219281 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:30.219303 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:30.223429 103685 round_trippers.go:574] Response Status: 200 OK in 4 milliseconds
I1216 19:55:30.712944 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:30.712968 103685 round_trippers.go:469] Request Headers:
I1216 19:55:30.712992 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:30.712997 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:30.716634 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:55:30.717934 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:30.717954 103685 round_trippers.go:469] Request Headers:
I1216 19:55:30.717964 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:30.717983 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:30.720779 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:30.721709 103685 pod_ready.go:103] pod "kube-controller-manager-ha-082404" in "kube-system" namespace has status "Ready":"False"
I1216 19:55:31.212907 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:31.212942 103685 round_trippers.go:469] Request Headers:
I1216 19:55:31.212952 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:31.212956 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:31.216672 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:55:31.217842 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:31.217864 103685 round_trippers.go:469] Request Headers:
I1216 19:55:31.217874 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:31.217879 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:31.221159 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:55:31.712877 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:31.712905 103685 round_trippers.go:469] Request Headers:
I1216 19:55:31.712915 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:31.712921 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:31.716250 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:55:31.717123 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:31.717144 103685 round_trippers.go:469] Request Headers:
I1216 19:55:31.717153 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:31.717158 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:31.720058 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:32.212506 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:32.212587 103685 round_trippers.go:469] Request Headers:
I1216 19:55:32.212641 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:32.212673 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:32.216330 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:55:32.217428 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:32.217487 103685 round_trippers.go:469] Request Headers:
I1216 19:55:32.217510 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:32.217529 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:32.220714 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:55:32.712252 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:32.712338 103685 round_trippers.go:469] Request Headers:
I1216 19:55:32.712362 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:32.712381 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:32.716163 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:55:32.717562 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:32.717622 103685 round_trippers.go:469] Request Headers:
I1216 19:55:32.717644 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:32.717664 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:32.721327 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:55:32.722472 103685 pod_ready.go:103] pod "kube-controller-manager-ha-082404" in "kube-system" namespace has status "Ready":"False"
I1216 19:55:33.212850 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:33.212883 103685 round_trippers.go:469] Request Headers:
I1216 19:55:33.212893 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:33.212897 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:33.215746 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:33.216939 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:33.216954 103685 round_trippers.go:469] Request Headers:
I1216 19:55:33.216963 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:33.216968 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:33.238659 103685 round_trippers.go:574] Response Status: 200 OK in 21 milliseconds
I1216 19:55:33.712187 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:33.712207 103685 round_trippers.go:469] Request Headers:
I1216 19:55:33.712216 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:33.712221 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:33.717777 103685 round_trippers.go:574] Response Status: 200 OK in 5 milliseconds
I1216 19:55:33.718485 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:33.718497 103685 round_trippers.go:469] Request Headers:
I1216 19:55:33.718505 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:33.718510 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:33.722346 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:55:34.213167 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:34.213192 103685 round_trippers.go:469] Request Headers:
I1216 19:55:34.213202 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:34.213208 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:34.216390 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:55:34.217092 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:34.217114 103685 round_trippers.go:469] Request Headers:
I1216 19:55:34.217123 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:34.217129 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:34.219881 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:34.712704 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:34.712730 103685 round_trippers.go:469] Request Headers:
I1216 19:55:34.712740 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:34.712745 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:34.715715 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:34.716955 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:34.716971 103685 round_trippers.go:469] Request Headers:
I1216 19:55:34.716981 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:34.716987 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:34.719685 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:35.212232 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:35.212255 103685 round_trippers.go:469] Request Headers:
I1216 19:55:35.212265 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:35.212269 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:35.215656 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:55:35.216580 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:35.216601 103685 round_trippers.go:469] Request Headers:
I1216 19:55:35.216612 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:35.216616 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:35.219428 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:35.220026 103685 pod_ready.go:103] pod "kube-controller-manager-ha-082404" in "kube-system" namespace has status "Ready":"False"
I1216 19:55:35.712991 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:35.713017 103685 round_trippers.go:469] Request Headers:
I1216 19:55:35.713028 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:35.713033 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:35.716060 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:55:35.717056 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:35.717079 103685 round_trippers.go:469] Request Headers:
I1216 19:55:35.717089 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:35.717094 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:35.719764 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:36.213055 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:36.213083 103685 round_trippers.go:469] Request Headers:
I1216 19:55:36.213093 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:36.213097 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:36.216327 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:55:36.217607 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:36.217695 103685 round_trippers.go:469] Request Headers:
I1216 19:55:36.217709 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:36.217715 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:36.220812 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:55:36.712218 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:36.712243 103685 round_trippers.go:469] Request Headers:
I1216 19:55:36.712253 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:36.712258 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:36.715134 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:36.715877 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:36.715893 103685 round_trippers.go:469] Request Headers:
I1216 19:55:36.715902 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:36.715907 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:36.718328 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:37.212544 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:37.212568 103685 round_trippers.go:469] Request Headers:
I1216 19:55:37.212578 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:37.212581 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:37.215728 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:55:37.216590 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:37.216609 103685 round_trippers.go:469] Request Headers:
I1216 19:55:37.216619 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:37.216625 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:37.219296 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:37.712211 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:37.712238 103685 round_trippers.go:469] Request Headers:
I1216 19:55:37.712249 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:37.712253 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:37.715236 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:37.715907 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:37.715936 103685 round_trippers.go:469] Request Headers:
I1216 19:55:37.715945 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:37.715948 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:37.718574 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:37.719157 103685 pod_ready.go:103] pod "kube-controller-manager-ha-082404" in "kube-system" namespace has status "Ready":"False"
I1216 19:55:38.212230 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:38.212263 103685 round_trippers.go:469] Request Headers:
I1216 19:55:38.212282 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:38.212286 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:38.215354 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:55:38.216122 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:38.216158 103685 round_trippers.go:469] Request Headers:
I1216 19:55:38.216169 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:38.216187 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:38.219086 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:38.712255 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:38.712280 103685 round_trippers.go:469] Request Headers:
I1216 19:55:38.712289 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:38.712294 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:38.716944 103685 round_trippers.go:574] Response Status: 200 OK in 4 milliseconds
I1216 19:55:38.717993 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:38.718013 103685 round_trippers.go:469] Request Headers:
I1216 19:55:38.718024 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:38.718028 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:38.720817 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:39.213204 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:39.213230 103685 round_trippers.go:469] Request Headers:
I1216 19:55:39.213240 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:39.213246 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:39.216241 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:39.217366 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:39.217392 103685 round_trippers.go:469] Request Headers:
I1216 19:55:39.217402 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:39.217406 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:39.220158 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:39.712409 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:39.712437 103685 round_trippers.go:469] Request Headers:
I1216 19:55:39.712447 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:39.712453 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:39.715348 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:39.716426 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:39.716443 103685 round_trippers.go:469] Request Headers:
I1216 19:55:39.716452 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:39.716457 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:39.719405 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:39.719914 103685 pod_ready.go:103] pod "kube-controller-manager-ha-082404" in "kube-system" namespace has status "Ready":"False"
I1216 19:55:40.212702 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:40.212721 103685 round_trippers.go:469] Request Headers:
I1216 19:55:40.212731 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:40.212734 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:40.216536 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:55:40.217297 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:40.217323 103685 round_trippers.go:469] Request Headers:
I1216 19:55:40.217332 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:40.217337 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:40.221184 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:55:40.712187 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:40.712214 103685 round_trippers.go:469] Request Headers:
I1216 19:55:40.712224 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:40.712230 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:40.715074 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:40.716186 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:40.716246 103685 round_trippers.go:469] Request Headers:
I1216 19:55:40.716269 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:40.716289 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:40.718966 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:41.212113 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:41.212133 103685 round_trippers.go:469] Request Headers:
I1216 19:55:41.212148 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:41.212152 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:41.221923 103685 round_trippers.go:574] Response Status: 200 OK in 9 milliseconds
I1216 19:55:41.223077 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:41.223094 103685 round_trippers.go:469] Request Headers:
I1216 19:55:41.223104 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:41.223110 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:41.234284 103685 round_trippers.go:574] Response Status: 200 OK in 11 milliseconds
I1216 19:55:41.713013 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:41.713031 103685 round_trippers.go:469] Request Headers:
I1216 19:55:41.713040 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:41.713044 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:41.727694 103685 round_trippers.go:574] Response Status: 200 OK in 14 milliseconds
I1216 19:55:41.728884 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:41.728901 103685 round_trippers.go:469] Request Headers:
I1216 19:55:41.728910 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:41.728915 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:41.758360 103685 round_trippers.go:574] Response Status: 200 OK in 29 milliseconds
I1216 19:55:41.759387 103685 pod_ready.go:103] pod "kube-controller-manager-ha-082404" in "kube-system" namespace has status "Ready":"False"
I1216 19:55:42.213086 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:42.213110 103685 round_trippers.go:469] Request Headers:
I1216 19:55:42.213120 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:42.213124 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:42.229786 103685 round_trippers.go:574] Response Status: 200 OK in 16 milliseconds
I1216 19:55:42.230667 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:42.230684 103685 round_trippers.go:469] Request Headers:
I1216 19:55:42.230692 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:42.230696 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:42.237667 103685 round_trippers.go:574] Response Status: 200 OK in 6 milliseconds
I1216 19:55:42.712949 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:42.712967 103685 round_trippers.go:469] Request Headers:
I1216 19:55:42.712976 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:42.712981 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:42.746824 103685 round_trippers.go:574] Response Status: 200 OK in 33 milliseconds
I1216 19:55:42.747567 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:42.747583 103685 round_trippers.go:469] Request Headers:
I1216 19:55:42.747592 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:42.747595 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:42.762485 103685 round_trippers.go:574] Response Status: 200 OK in 14 milliseconds
I1216 19:55:43.212259 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:43.212281 103685 round_trippers.go:469] Request Headers:
I1216 19:55:43.212291 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:43.212297 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:43.225759 103685 round_trippers.go:574] Response Status: 200 OK in 13 milliseconds
I1216 19:55:43.226968 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:43.226986 103685 round_trippers.go:469] Request Headers:
I1216 19:55:43.226996 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:43.227004 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:43.239485 103685 round_trippers.go:574] Response Status: 200 OK in 12 milliseconds
I1216 19:55:43.713028 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:43.713046 103685 round_trippers.go:469] Request Headers:
I1216 19:55:43.713056 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:43.713060 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:43.724510 103685 round_trippers.go:574] Response Status: 200 OK in 11 milliseconds
I1216 19:55:43.725695 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:43.725712 103685 round_trippers.go:469] Request Headers:
I1216 19:55:43.725721 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:43.725727 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:43.740178 103685 round_trippers.go:574] Response Status: 200 OK in 14 milliseconds
I1216 19:55:44.212177 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:44.212196 103685 round_trippers.go:469] Request Headers:
I1216 19:55:44.212205 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:44.212208 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:44.225183 103685 round_trippers.go:574] Response Status: 200 OK in 12 milliseconds
I1216 19:55:44.226335 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:44.226351 103685 round_trippers.go:469] Request Headers:
I1216 19:55:44.226360 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:44.226364 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:44.230702 103685 round_trippers.go:574] Response Status: 200 OK in 4 milliseconds
I1216 19:55:44.231588 103685 pod_ready.go:103] pod "kube-controller-manager-ha-082404" in "kube-system" namespace has status "Ready":"False"
I1216 19:55:44.712206 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:44.712238 103685 round_trippers.go:469] Request Headers:
I1216 19:55:44.712249 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:44.712256 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:44.715275 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:55:44.716614 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:44.716631 103685 round_trippers.go:469] Request Headers:
I1216 19:55:44.716639 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:44.716643 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:44.719876 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:55:45.212796 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:45.212822 103685 round_trippers.go:469] Request Headers:
I1216 19:55:45.212833 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:45.212838 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:45.225107 103685 round_trippers.go:574] Response Status: 200 OK in 12 milliseconds
I1216 19:55:45.226663 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:45.226685 103685 round_trippers.go:469] Request Headers:
I1216 19:55:45.226695 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:45.226700 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:45.257691 103685 round_trippers.go:574] Response Status: 200 OK in 30 milliseconds
I1216 19:55:45.712174 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:45.712198 103685 round_trippers.go:469] Request Headers:
I1216 19:55:45.712208 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:45.712212 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:45.715138 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:45.715833 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:45.715847 103685 round_trippers.go:469] Request Headers:
I1216 19:55:45.715857 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:45.715862 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:45.718262 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:46.212311 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:46.212336 103685 round_trippers.go:469] Request Headers:
I1216 19:55:46.212347 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:46.212351 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:46.215514 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:55:46.217439 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:46.217459 103685 round_trippers.go:469] Request Headers:
I1216 19:55:46.217468 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:46.217473 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:46.222554 103685 round_trippers.go:574] Response Status: 200 OK in 5 milliseconds
I1216 19:55:46.712997 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:46.713021 103685 round_trippers.go:469] Request Headers:
I1216 19:55:46.713030 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:46.713036 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:46.716121 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:55:46.716949 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:46.716970 103685 round_trippers.go:469] Request Headers:
I1216 19:55:46.716980 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:46.716983 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:46.719456 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:46.720093 103685 pod_ready.go:103] pod "kube-controller-manager-ha-082404" in "kube-system" namespace has status "Ready":"False"
I1216 19:55:47.212205 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:47.212228 103685 round_trippers.go:469] Request Headers:
I1216 19:55:47.212237 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:47.212241 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:47.215216 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:47.216032 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:47.216092 103685 round_trippers.go:469] Request Headers:
I1216 19:55:47.216103 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:47.216107 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:47.224911 103685 round_trippers.go:574] Response Status: 200 OK in 8 milliseconds
I1216 19:55:47.713073 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:47.713097 103685 round_trippers.go:469] Request Headers:
I1216 19:55:47.713113 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:47.713117 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:47.716414 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:55:47.717113 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:47.717124 103685 round_trippers.go:469] Request Headers:
I1216 19:55:47.717133 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:47.717139 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:47.719985 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:48.212914 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:48.212940 103685 round_trippers.go:469] Request Headers:
I1216 19:55:48.212949 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:48.212955 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:48.215914 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:48.217172 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:48.217201 103685 round_trippers.go:469] Request Headers:
I1216 19:55:48.217211 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:48.217215 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:48.219965 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:48.712147 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:48.712173 103685 round_trippers.go:469] Request Headers:
I1216 19:55:48.712184 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:48.712189 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:48.715144 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:48.715948 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:48.715966 103685 round_trippers.go:469] Request Headers:
I1216 19:55:48.715975 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:48.715982 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:48.718654 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:49.212732 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:49.212756 103685 round_trippers.go:469] Request Headers:
I1216 19:55:49.212766 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:49.212770 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:49.215778 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:49.216545 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:49.216562 103685 round_trippers.go:469] Request Headers:
I1216 19:55:49.216572 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:49.216576 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:49.223372 103685 round_trippers.go:574] Response Status: 200 OK in 6 milliseconds
I1216 19:55:49.223887 103685 pod_ready.go:103] pod "kube-controller-manager-ha-082404" in "kube-system" namespace has status "Ready":"False"
I1216 19:55:49.712430 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:49.712455 103685 round_trippers.go:469] Request Headers:
I1216 19:55:49.712465 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:49.712469 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:49.715408 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:49.716312 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:49.716335 103685 round_trippers.go:469] Request Headers:
I1216 19:55:49.716356 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:49.716361 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:49.719130 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:50.212583 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:50.212607 103685 round_trippers.go:469] Request Headers:
I1216 19:55:50.212616 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:50.212621 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:50.215582 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:50.216536 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:50.216556 103685 round_trippers.go:469] Request Headers:
I1216 19:55:50.216565 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:50.216571 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:50.219210 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:50.712785 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:50.712809 103685 round_trippers.go:469] Request Headers:
I1216 19:55:50.712818 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:50.712824 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:50.716075 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:55:50.716935 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:50.716953 103685 round_trippers.go:469] Request Headers:
I1216 19:55:50.716963 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:50.716969 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:50.719722 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:51.212916 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:51.212941 103685 round_trippers.go:469] Request Headers:
I1216 19:55:51.212951 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:51.212957 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:51.215989 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:55:51.216804 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:51.216846 103685 round_trippers.go:469] Request Headers:
I1216 19:55:51.216890 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:51.216915 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:51.226587 103685 round_trippers.go:574] Response Status: 200 OK in 9 milliseconds
I1216 19:55:51.227391 103685 pod_ready.go:103] pod "kube-controller-manager-ha-082404" in "kube-system" namespace has status "Ready":"False"
I1216 19:55:51.713057 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:51.713083 103685 round_trippers.go:469] Request Headers:
I1216 19:55:51.713094 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:51.713101 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:51.716214 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:55:51.716990 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:51.717009 103685 round_trippers.go:469] Request Headers:
I1216 19:55:51.717019 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:51.717026 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:51.719693 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:52.212797 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:52.212823 103685 round_trippers.go:469] Request Headers:
I1216 19:55:52.212833 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:52.212837 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:52.215770 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:52.216750 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:52.216768 103685 round_trippers.go:469] Request Headers:
I1216 19:55:52.216777 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:52.216781 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:52.224569 103685 round_trippers.go:574] Response Status: 200 OK in 7 milliseconds
I1216 19:55:52.712182 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:52.712204 103685 round_trippers.go:469] Request Headers:
I1216 19:55:52.712213 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:52.712219 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:52.718161 103685 round_trippers.go:574] Response Status: 200 OK in 5 milliseconds
I1216 19:55:52.719039 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:52.719091 103685 round_trippers.go:469] Request Headers:
I1216 19:55:52.719113 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:52.719131 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:52.725547 103685 round_trippers.go:574] Response Status: 200 OK in 6 milliseconds
I1216 19:55:53.212388 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:53.212413 103685 round_trippers.go:469] Request Headers:
I1216 19:55:53.212424 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:53.212430 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:53.215492 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:55:53.216772 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:53.216799 103685 round_trippers.go:469] Request Headers:
I1216 19:55:53.216809 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:53.216813 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:53.219421 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:53.712969 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:53.712994 103685 round_trippers.go:469] Request Headers:
I1216 19:55:53.713010 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:53.713034 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:53.716221 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:55:53.716917 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:53.716933 103685 round_trippers.go:469] Request Headers:
I1216 19:55:53.716942 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:53.716948 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:53.719682 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:53.720240 103685 pod_ready.go:103] pod "kube-controller-manager-ha-082404" in "kube-system" namespace has status "Ready":"False"
I1216 19:55:54.212578 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:54.212603 103685 round_trippers.go:469] Request Headers:
I1216 19:55:54.212614 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:54.212618 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:54.216205 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:55:54.217135 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:54.217158 103685 round_trippers.go:469] Request Headers:
I1216 19:55:54.217168 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:54.217172 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:54.220408 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:55:54.712193 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:54.712220 103685 round_trippers.go:469] Request Headers:
I1216 19:55:54.712229 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:54.712233 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:54.715658 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:55:54.716647 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:54.716666 103685 round_trippers.go:469] Request Headers:
I1216 19:55:54.716676 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:54.716680 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:54.719360 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:55.212681 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:55.212706 103685 round_trippers.go:469] Request Headers:
I1216 19:55:55.212716 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:55.212720 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:55.215879 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:55:55.216581 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:55.216591 103685 round_trippers.go:469] Request Headers:
I1216 19:55:55.216599 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:55.216603 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:55.219690 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:55:55.713088 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:55.713113 103685 round_trippers.go:469] Request Headers:
I1216 19:55:55.713123 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:55.713127 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:55.716258 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:55:55.716906 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:55.716917 103685 round_trippers.go:469] Request Headers:
I1216 19:55:55.716925 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:55.716929 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:55.719373 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:56.212538 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:55:56.212563 103685 round_trippers.go:469] Request Headers:
I1216 19:55:56.212572 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:56.212577 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:56.215363 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:56.216109 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:55:56.216129 103685 round_trippers.go:469] Request Headers:
I1216 19:55:56.216139 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:56.216145 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:56.223684 103685 round_trippers.go:574] Response Status: 200 OK in 7 milliseconds
I1216 19:55:56.224157 103685 pod_ready.go:93] pod "kube-controller-manager-ha-082404" in "kube-system" namespace has status "Ready":"True"
I1216 19:55:56.224178 103685 pod_ready.go:82] duration metric: took 29.512196236s for pod "kube-controller-manager-ha-082404" in "kube-system" namespace to be "Ready" ...
I1216 19:55:56.224197 103685 pod_ready.go:79] waiting up to 6m0s for pod "kube-controller-manager-ha-082404-m02" in "kube-system" namespace to be "Ready" ...
I1216 19:55:56.224266 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:55:56.224277 103685 round_trippers.go:469] Request Headers:
I1216 19:55:56.224285 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:56.224290 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:56.226876 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:56.227523 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:55:56.227544 103685 round_trippers.go:469] Request Headers:
I1216 19:55:56.227553 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:56.227564 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:56.230165 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:56.725211 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:55:56.725243 103685 round_trippers.go:469] Request Headers:
I1216 19:55:56.725253 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:56.725257 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:56.728559 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:55:56.729538 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:55:56.729557 103685 round_trippers.go:469] Request Headers:
I1216 19:55:56.729569 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:56.729580 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:56.738833 103685 round_trippers.go:574] Response Status: 200 OK in 9 milliseconds
I1216 19:55:57.224837 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:55:57.224856 103685 round_trippers.go:469] Request Headers:
I1216 19:55:57.224865 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:57.224870 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:57.235546 103685 round_trippers.go:574] Response Status: 200 OK in 10 milliseconds
I1216 19:55:57.236355 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:55:57.236369 103685 round_trippers.go:469] Request Headers:
I1216 19:55:57.236379 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:57.236383 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:57.241555 103685 round_trippers.go:574] Response Status: 200 OK in 5 milliseconds
I1216 19:55:57.724421 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:55:57.724447 103685 round_trippers.go:469] Request Headers:
I1216 19:55:57.724457 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:57.724461 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:57.727386 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:57.728265 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:55:57.728311 103685 round_trippers.go:469] Request Headers:
I1216 19:55:57.728335 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:57.728355 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:57.731607 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:55:58.225234 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:55:58.225259 103685 round_trippers.go:469] Request Headers:
I1216 19:55:58.225268 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:58.225272 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:58.228727 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:55:58.229973 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:55:58.229996 103685 round_trippers.go:469] Request Headers:
I1216 19:55:58.230006 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:58.230010 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:58.232747 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:58.233354 103685 pod_ready.go:103] pod "kube-controller-manager-ha-082404-m02" in "kube-system" namespace has status "Ready":"False"
I1216 19:55:58.724722 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:55:58.724745 103685 round_trippers.go:469] Request Headers:
I1216 19:55:58.724756 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:58.724761 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:58.727738 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:58.728569 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:55:58.728592 103685 round_trippers.go:469] Request Headers:
I1216 19:55:58.728602 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:58.728609 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:58.732661 103685 round_trippers.go:574] Response Status: 200 OK in 4 milliseconds
I1216 19:55:59.225336 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:55:59.225360 103685 round_trippers.go:469] Request Headers:
I1216 19:55:59.225369 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:59.225374 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:59.228191 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:59.229224 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:55:59.229243 103685 round_trippers.go:469] Request Headers:
I1216 19:55:59.229253 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:59.229257 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:59.231870 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:59.725225 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:55:59.725251 103685 round_trippers.go:469] Request Headers:
I1216 19:55:59.725261 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:59.725269 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:59.728259 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:55:59.729049 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:55:59.729097 103685 round_trippers.go:469] Request Headers:
I1216 19:55:59.729125 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:55:59.729149 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:55:59.732447 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:56:00.225106 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:56:00.225142 103685 round_trippers.go:469] Request Headers:
I1216 19:56:00.225166 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:00.225174 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:00.230377 103685 round_trippers.go:574] Response Status: 200 OK in 5 milliseconds
I1216 19:56:00.239810 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:00.239833 103685 round_trippers.go:469] Request Headers:
I1216 19:56:00.239841 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:00.239845 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:00.270052 103685 round_trippers.go:574] Response Status: 200 OK in 30 milliseconds
I1216 19:56:00.270596 103685 pod_ready.go:103] pod "kube-controller-manager-ha-082404-m02" in "kube-system" namespace has status "Ready":"False"
I1216 19:56:00.725022 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:56:00.725048 103685 round_trippers.go:469] Request Headers:
I1216 19:56:00.725057 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:00.725061 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:00.727994 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:00.728914 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:00.728935 103685 round_trippers.go:469] Request Headers:
I1216 19:56:00.728945 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:00.728949 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:00.736767 103685 round_trippers.go:574] Response Status: 200 OK in 7 milliseconds
I1216 19:56:01.225339 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:56:01.225373 103685 round_trippers.go:469] Request Headers:
I1216 19:56:01.225385 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:01.225391 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:01.229490 103685 round_trippers.go:574] Response Status: 200 OK in 4 milliseconds
I1216 19:56:01.230405 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:01.230430 103685 round_trippers.go:469] Request Headers:
I1216 19:56:01.230441 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:01.230446 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:01.233958 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:56:01.724544 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:56:01.724614 103685 round_trippers.go:469] Request Headers:
I1216 19:56:01.724666 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:01.724691 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:01.728975 103685 round_trippers.go:574] Response Status: 200 OK in 4 milliseconds
I1216 19:56:01.730492 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:01.730513 103685 round_trippers.go:469] Request Headers:
I1216 19:56:01.730522 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:01.730526 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:01.733196 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:02.225187 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:56:02.225211 103685 round_trippers.go:469] Request Headers:
I1216 19:56:02.225223 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:02.225230 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:02.228173 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:02.229193 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:02.229210 103685 round_trippers.go:469] Request Headers:
I1216 19:56:02.229219 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:02.229222 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:02.231917 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:02.724994 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:56:02.725021 103685 round_trippers.go:469] Request Headers:
I1216 19:56:02.725031 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:02.725042 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:02.727987 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:02.728780 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:02.728798 103685 round_trippers.go:469] Request Headers:
I1216 19:56:02.728807 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:02.728812 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:02.738964 103685 round_trippers.go:574] Response Status: 200 OK in 10 milliseconds
I1216 19:56:02.739636 103685 pod_ready.go:103] pod "kube-controller-manager-ha-082404-m02" in "kube-system" namespace has status "Ready":"False"
I1216 19:56:03.224520 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:56:03.224542 103685 round_trippers.go:469] Request Headers:
I1216 19:56:03.224552 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:03.224558 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:03.227493 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:03.228182 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:03.228192 103685 round_trippers.go:469] Request Headers:
I1216 19:56:03.228201 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:03.228205 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:03.230879 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:03.725092 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:56:03.725116 103685 round_trippers.go:469] Request Headers:
I1216 19:56:03.725126 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:03.725130 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:03.728149 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:56:03.728948 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:03.728964 103685 round_trippers.go:469] Request Headers:
I1216 19:56:03.728974 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:03.728979 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:03.732812 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:56:04.224978 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:56:04.224998 103685 round_trippers.go:469] Request Headers:
I1216 19:56:04.225007 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:04.225013 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:04.227993 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:04.228904 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:04.228923 103685 round_trippers.go:469] Request Headers:
I1216 19:56:04.228932 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:04.228936 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:04.231471 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:04.724532 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:56:04.724556 103685 round_trippers.go:469] Request Headers:
I1216 19:56:04.724565 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:04.724571 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:04.727627 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:56:04.728403 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:04.728462 103685 round_trippers.go:469] Request Headers:
I1216 19:56:04.728478 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:04.728484 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:04.738590 103685 round_trippers.go:574] Response Status: 200 OK in 10 milliseconds
I1216 19:56:05.224466 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:56:05.224486 103685 round_trippers.go:469] Request Headers:
I1216 19:56:05.224496 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:05.224503 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:05.227450 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:05.228229 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:05.228239 103685 round_trippers.go:469] Request Headers:
I1216 19:56:05.228248 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:05.228252 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:05.230766 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:05.231317 103685 pod_ready.go:103] pod "kube-controller-manager-ha-082404-m02" in "kube-system" namespace has status "Ready":"False"
I1216 19:56:05.725240 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:56:05.725266 103685 round_trippers.go:469] Request Headers:
I1216 19:56:05.725281 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:05.725287 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:05.728403 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:56:05.729196 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:05.729214 103685 round_trippers.go:469] Request Headers:
I1216 19:56:05.729224 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:05.729228 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:05.739886 103685 round_trippers.go:574] Response Status: 200 OK in 10 milliseconds
I1216 19:56:06.225116 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:56:06.225146 103685 round_trippers.go:469] Request Headers:
I1216 19:56:06.225157 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:06.225161 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:06.228388 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:56:06.229443 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:06.229462 103685 round_trippers.go:469] Request Headers:
I1216 19:56:06.229471 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:06.229477 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:06.232251 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:06.724486 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:56:06.724508 103685 round_trippers.go:469] Request Headers:
I1216 19:56:06.724522 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:06.724526 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:06.727728 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:56:06.728583 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:06.728601 103685 round_trippers.go:469] Request Headers:
I1216 19:56:06.728610 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:06.728615 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:06.732980 103685 round_trippers.go:574] Response Status: 200 OK in 4 milliseconds
I1216 19:56:07.225410 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:56:07.225431 103685 round_trippers.go:469] Request Headers:
I1216 19:56:07.225442 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:07.225447 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:07.228505 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:56:07.229334 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:07.229355 103685 round_trippers.go:469] Request Headers:
I1216 19:56:07.229364 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:07.229370 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:07.232170 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:07.232728 103685 pod_ready.go:103] pod "kube-controller-manager-ha-082404-m02" in "kube-system" namespace has status "Ready":"False"
I1216 19:56:07.725021 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:56:07.725045 103685 round_trippers.go:469] Request Headers:
I1216 19:56:07.725054 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:07.725059 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:07.732272 103685 round_trippers.go:574] Response Status: 200 OK in 7 milliseconds
I1216 19:56:07.733238 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:07.733260 103685 round_trippers.go:469] Request Headers:
I1216 19:56:07.733270 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:07.733277 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:07.736568 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:56:08.224912 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:56:08.224936 103685 round_trippers.go:469] Request Headers:
I1216 19:56:08.224946 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:08.224951 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:08.228358 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:56:08.229669 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:08.229690 103685 round_trippers.go:469] Request Headers:
I1216 19:56:08.229700 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:08.229705 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:08.232601 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:08.724430 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:56:08.724456 103685 round_trippers.go:469] Request Headers:
I1216 19:56:08.724467 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:08.724476 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:08.727791 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:56:08.729191 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:08.729215 103685 round_trippers.go:469] Request Headers:
I1216 19:56:08.729225 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:08.729230 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:08.739485 103685 round_trippers.go:574] Response Status: 200 OK in 10 milliseconds
I1216 19:56:09.225361 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:56:09.225401 103685 round_trippers.go:469] Request Headers:
I1216 19:56:09.225415 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:09.225421 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:09.230915 103685 round_trippers.go:574] Response Status: 200 OK in 5 milliseconds
I1216 19:56:09.240924 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:09.240950 103685 round_trippers.go:469] Request Headers:
I1216 19:56:09.240960 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:09.240971 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:09.250310 103685 round_trippers.go:574] Response Status: 200 OK in 9 milliseconds
I1216 19:56:09.250990 103685 pod_ready.go:103] pod "kube-controller-manager-ha-082404-m02" in "kube-system" namespace has status "Ready":"False"
I1216 19:56:09.725171 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:56:09.725239 103685 round_trippers.go:469] Request Headers:
I1216 19:56:09.725264 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:09.725284 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:09.728894 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:56:09.730230 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:09.730299 103685 round_trippers.go:469] Request Headers:
I1216 19:56:09.730323 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:09.730342 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:09.740832 103685 round_trippers.go:574] Response Status: 200 OK in 10 milliseconds
I1216 19:56:10.225245 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:56:10.225267 103685 round_trippers.go:469] Request Headers:
I1216 19:56:10.225277 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:10.225281 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:10.229765 103685 round_trippers.go:574] Response Status: 200 OK in 4 milliseconds
I1216 19:56:10.231122 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:10.231178 103685 round_trippers.go:469] Request Headers:
I1216 19:56:10.231211 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:10.231229 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:10.234188 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:10.725143 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:56:10.725215 103685 round_trippers.go:469] Request Headers:
I1216 19:56:10.725237 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:10.725265 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:10.729280 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:56:10.736975 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:10.737045 103685 round_trippers.go:469] Request Headers:
I1216 19:56:10.737070 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:10.737087 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:10.749891 103685 round_trippers.go:574] Response Status: 200 OK in 12 milliseconds
I1216 19:56:11.224785 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:56:11.224806 103685 round_trippers.go:469] Request Headers:
I1216 19:56:11.224816 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:11.224820 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:11.227897 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:56:11.228760 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:11.228813 103685 round_trippers.go:469] Request Headers:
I1216 19:56:11.228836 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:11.228855 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:11.231460 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:11.724708 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:56:11.724735 103685 round_trippers.go:469] Request Headers:
I1216 19:56:11.724743 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:11.724747 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:11.728919 103685 round_trippers.go:574] Response Status: 200 OK in 4 milliseconds
I1216 19:56:11.730129 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:11.730151 103685 round_trippers.go:469] Request Headers:
I1216 19:56:11.730165 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:11.730173 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:11.734151 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:56:11.734687 103685 pod_ready.go:103] pod "kube-controller-manager-ha-082404-m02" in "kube-system" namespace has status "Ready":"False"
I1216 19:56:12.225000 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:56:12.225032 103685 round_trippers.go:469] Request Headers:
I1216 19:56:12.225043 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:12.225047 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:12.227988 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:12.228784 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:12.228805 103685 round_trippers.go:469] Request Headers:
I1216 19:56:12.228815 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:12.228820 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:12.231696 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:12.724494 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:56:12.724517 103685 round_trippers.go:469] Request Headers:
I1216 19:56:12.724527 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:12.724531 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:12.727731 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:56:12.728784 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:12.728800 103685 round_trippers.go:469] Request Headers:
I1216 19:56:12.728808 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:12.728813 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:12.732751 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:56:13.224940 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:56:13.224965 103685 round_trippers.go:469] Request Headers:
I1216 19:56:13.224975 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:13.224979 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:13.228234 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:56:13.229084 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:13.229103 103685 round_trippers.go:469] Request Headers:
I1216 19:56:13.229112 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:13.229119 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:13.231882 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:13.724457 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:56:13.724480 103685 round_trippers.go:469] Request Headers:
I1216 19:56:13.724490 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:13.724493 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:13.727519 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:56:13.728335 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:13.728355 103685 round_trippers.go:469] Request Headers:
I1216 19:56:13.728365 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:13.728391 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:13.731971 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:56:14.224551 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:56:14.224574 103685 round_trippers.go:469] Request Headers:
I1216 19:56:14.224584 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:14.224588 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:14.227614 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:56:14.228376 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:14.228398 103685 round_trippers.go:469] Request Headers:
I1216 19:56:14.228408 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:14.228414 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:14.231233 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:14.232006 103685 pod_ready.go:103] pod "kube-controller-manager-ha-082404-m02" in "kube-system" namespace has status "Ready":"False"
I1216 19:56:14.725360 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:56:14.725396 103685 round_trippers.go:469] Request Headers:
I1216 19:56:14.725407 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:14.725413 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:14.728382 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:14.729877 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:14.729894 103685 round_trippers.go:469] Request Headers:
I1216 19:56:14.729904 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:14.729909 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:14.735028 103685 round_trippers.go:574] Response Status: 200 OK in 5 milliseconds
I1216 19:56:15.225207 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:56:15.225236 103685 round_trippers.go:469] Request Headers:
I1216 19:56:15.225246 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:15.225251 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:15.228353 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:56:15.229134 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:15.229151 103685 round_trippers.go:469] Request Headers:
I1216 19:56:15.229161 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:15.229165 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:15.232007 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:15.724472 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:56:15.724498 103685 round_trippers.go:469] Request Headers:
I1216 19:56:15.724508 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:15.724514 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:15.727828 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:56:15.728647 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:15.728682 103685 round_trippers.go:469] Request Headers:
I1216 19:56:15.728694 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:15.728703 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:15.731552 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:16.225281 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:56:16.225306 103685 round_trippers.go:469] Request Headers:
I1216 19:56:16.225316 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:16.225321 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:16.228384 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:56:16.229331 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:16.229352 103685 round_trippers.go:469] Request Headers:
I1216 19:56:16.229361 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:16.229367 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:16.232168 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:16.232808 103685 pod_ready.go:103] pod "kube-controller-manager-ha-082404-m02" in "kube-system" namespace has status "Ready":"False"
I1216 19:56:16.724501 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:56:16.724527 103685 round_trippers.go:469] Request Headers:
I1216 19:56:16.724540 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:16.724544 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:16.727390 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:16.728122 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:16.728136 103685 round_trippers.go:469] Request Headers:
I1216 19:56:16.728145 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:16.728149 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:16.731646 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:56:17.224456 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:56:17.224478 103685 round_trippers.go:469] Request Headers:
I1216 19:56:17.224488 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:17.224492 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:17.227469 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:17.228184 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:17.228202 103685 round_trippers.go:469] Request Headers:
I1216 19:56:17.228211 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:17.228215 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:17.231017 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:17.725043 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:56:17.725066 103685 round_trippers.go:469] Request Headers:
I1216 19:56:17.725077 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:17.725081 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:17.728056 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:17.729693 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:17.729712 103685 round_trippers.go:469] Request Headers:
I1216 19:56:17.729721 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:17.729727 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:17.732700 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:18.225151 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:56:18.225183 103685 round_trippers.go:469] Request Headers:
I1216 19:56:18.225194 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:18.225202 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:18.228046 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:18.228700 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:18.228710 103685 round_trippers.go:469] Request Headers:
I1216 19:56:18.228718 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:18.228725 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:18.231197 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:18.724405 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:56:18.724438 103685 round_trippers.go:469] Request Headers:
I1216 19:56:18.724448 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:18.724453 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:18.727355 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:18.728309 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:18.728329 103685 round_trippers.go:469] Request Headers:
I1216 19:56:18.728339 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:18.728344 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:18.731840 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:56:18.732750 103685 pod_ready.go:103] pod "kube-controller-manager-ha-082404-m02" in "kube-system" namespace has status "Ready":"False"
I1216 19:56:19.225071 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:56:19.225096 103685 round_trippers.go:469] Request Headers:
I1216 19:56:19.225107 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:19.225111 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:19.228339 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:56:19.229131 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:19.229153 103685 round_trippers.go:469] Request Headers:
I1216 19:56:19.229162 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:19.229168 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:19.231684 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:19.232164 103685 pod_ready.go:93] pod "kube-controller-manager-ha-082404-m02" in "kube-system" namespace has status "Ready":"True"
I1216 19:56:19.232187 103685 pod_ready.go:82] duration metric: took 23.007975057s for pod "kube-controller-manager-ha-082404-m02" in "kube-system" namespace to be "Ready" ...
I1216 19:56:19.232203 103685 pod_ready.go:79] waiting up to 6m0s for pod "kube-controller-manager-ha-082404-m03" in "kube-system" namespace to be "Ready" ...
I1216 19:56:19.232271 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m03
I1216 19:56:19.232281 103685 round_trippers.go:469] Request Headers:
I1216 19:56:19.232289 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:19.232296 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:19.234971 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:19.235843 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m03
I1216 19:56:19.235862 103685 round_trippers.go:469] Request Headers:
I1216 19:56:19.235872 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:19.235876 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:19.238222 103685 round_trippers.go:574] Response Status: 404 Not Found in 2 milliseconds
I1216 19:56:19.238362 103685 pod_ready.go:98] node "ha-082404-m03" hosting pod "kube-controller-manager-ha-082404-m03" in "kube-system" namespace is currently not "Ready" (skipping!): error getting node "ha-082404-m03": nodes "ha-082404-m03" not found
I1216 19:56:19.238380 103685 pod_ready.go:82] duration metric: took 6.167124ms for pod "kube-controller-manager-ha-082404-m03" in "kube-system" namespace to be "Ready" ...
E1216 19:56:19.238397 103685 pod_ready.go:67] WaitExtra: waitPodCondition: node "ha-082404-m03" hosting pod "kube-controller-manager-ha-082404-m03" in "kube-system" namespace is currently not "Ready" (skipping!): error getting node "ha-082404-m03": nodes "ha-082404-m03" not found
I1216 19:56:19.238415 103685 pod_ready.go:79] waiting up to 6m0s for pod "kube-proxy-kr525" in "kube-system" namespace to be "Ready" ...
I1216 19:56:19.238483 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-proxy-kr525
I1216 19:56:19.238491 103685 round_trippers.go:469] Request Headers:
I1216 19:56:19.238498 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:19.238503 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:19.241369 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:19.242179 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m03
I1216 19:56:19.242205 103685 round_trippers.go:469] Request Headers:
I1216 19:56:19.242213 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:19.242244 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:19.244668 103685 round_trippers.go:574] Response Status: 404 Not Found in 2 milliseconds
I1216 19:56:19.244822 103685 pod_ready.go:98] node "ha-082404-m03" hosting pod "kube-proxy-kr525" in "kube-system" namespace is currently not "Ready" (skipping!): error getting node "ha-082404-m03": nodes "ha-082404-m03" not found
I1216 19:56:19.244845 103685 pod_ready.go:82] duration metric: took 6.419318ms for pod "kube-proxy-kr525" in "kube-system" namespace to be "Ready" ...
E1216 19:56:19.244855 103685 pod_ready.go:67] WaitExtra: waitPodCondition: node "ha-082404-m03" hosting pod "kube-proxy-kr525" in "kube-system" namespace is currently not "Ready" (skipping!): error getting node "ha-082404-m03": nodes "ha-082404-m03" not found
I1216 19:56:19.244864 103685 pod_ready.go:79] waiting up to 6m0s for pod "kube-proxy-pvlrj" in "kube-system" namespace to be "Ready" ...
I1216 19:56:19.244932 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-proxy-pvlrj
I1216 19:56:19.244943 103685 round_trippers.go:469] Request Headers:
I1216 19:56:19.244952 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:19.244956 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:19.247831 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:19.248414 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m04
I1216 19:56:19.248432 103685 round_trippers.go:469] Request Headers:
I1216 19:56:19.248441 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:19.248445 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:19.251092 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:19.251605 103685 pod_ready.go:93] pod "kube-proxy-pvlrj" in "kube-system" namespace has status "Ready":"True"
I1216 19:56:19.251625 103685 pod_ready.go:82] duration metric: took 6.748556ms for pod "kube-proxy-pvlrj" in "kube-system" namespace to be "Ready" ...
I1216 19:56:19.251639 103685 pod_ready.go:79] waiting up to 6m0s for pod "kube-proxy-wmg6k" in "kube-system" namespace to be "Ready" ...
I1216 19:56:19.251702 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-proxy-wmg6k
I1216 19:56:19.251714 103685 round_trippers.go:469] Request Headers:
I1216 19:56:19.251732 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:19.251740 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:19.254304 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:19.255206 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:19.255228 103685 round_trippers.go:469] Request Headers:
I1216 19:56:19.255238 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:19.255242 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:19.257912 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:19.258496 103685 pod_ready.go:93] pod "kube-proxy-wmg6k" in "kube-system" namespace has status "Ready":"True"
I1216 19:56:19.258516 103685 pod_ready.go:82] duration metric: took 6.867379ms for pod "kube-proxy-wmg6k" in "kube-system" namespace to be "Ready" ...
I1216 19:56:19.258548 103685 pod_ready.go:79] waiting up to 6m0s for pod "kube-proxy-x7xbp" in "kube-system" namespace to be "Ready" ...
I1216 19:56:19.425919 103685 request.go:632] Waited for 167.283867ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-proxy-x7xbp
I1216 19:56:19.425991 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-proxy-x7xbp
I1216 19:56:19.426001 103685 round_trippers.go:469] Request Headers:
I1216 19:56:19.426011 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:19.426017 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:19.428924 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:19.625370 103685 request.go:632] Waited for 195.778885ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:56:19.625466 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:56:19.625482 103685 round_trippers.go:469] Request Headers:
I1216 19:56:19.625496 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:19.625510 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:19.631040 103685 round_trippers.go:574] Response Status: 200 OK in 5 milliseconds
I1216 19:56:19.631645 103685 pod_ready.go:93] pod "kube-proxy-x7xbp" in "kube-system" namespace has status "Ready":"True"
I1216 19:56:19.631667 103685 pod_ready.go:82] duration metric: took 373.106318ms for pod "kube-proxy-x7xbp" in "kube-system" namespace to be "Ready" ...
I1216 19:56:19.631680 103685 pod_ready.go:79] waiting up to 6m0s for pod "kube-scheduler-ha-082404" in "kube-system" namespace to be "Ready" ...
I1216 19:56:19.825646 103685 request.go:632] Waited for 193.899211ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-scheduler-ha-082404
I1216 19:56:19.825741 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-scheduler-ha-082404
I1216 19:56:19.825755 103685 round_trippers.go:469] Request Headers:
I1216 19:56:19.825764 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:19.825769 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:19.828718 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:20.025994 103685 request.go:632] Waited for 196.3119ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:56:20.026066 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:56:20.026077 103685 round_trippers.go:469] Request Headers:
I1216 19:56:20.026087 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:20.026091 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:20.029501 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:56:20.030116 103685 pod_ready.go:93] pod "kube-scheduler-ha-082404" in "kube-system" namespace has status "Ready":"True"
I1216 19:56:20.030141 103685 pod_ready.go:82] duration metric: took 398.454203ms for pod "kube-scheduler-ha-082404" in "kube-system" namespace to be "Ready" ...
I1216 19:56:20.030155 103685 pod_ready.go:79] waiting up to 6m0s for pod "kube-scheduler-ha-082404-m02" in "kube-system" namespace to be "Ready" ...
I1216 19:56:20.226027 103685 request.go:632] Waited for 195.806039ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-scheduler-ha-082404-m02
I1216 19:56:20.226101 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-scheduler-ha-082404-m02
I1216 19:56:20.226113 103685 round_trippers.go:469] Request Headers:
I1216 19:56:20.226122 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:20.226141 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:20.229284 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:56:20.425562 103685 request.go:632] Waited for 195.610328ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:20.425645 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:20.425658 103685 round_trippers.go:469] Request Headers:
I1216 19:56:20.425668 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:20.425672 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:20.428627 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:20.429307 103685 pod_ready.go:93] pod "kube-scheduler-ha-082404-m02" in "kube-system" namespace has status "Ready":"True"
I1216 19:56:20.429324 103685 pod_ready.go:82] duration metric: took 399.161037ms for pod "kube-scheduler-ha-082404-m02" in "kube-system" namespace to be "Ready" ...
I1216 19:56:20.429338 103685 pod_ready.go:79] waiting up to 6m0s for pod "kube-scheduler-ha-082404-m03" in "kube-system" namespace to be "Ready" ...
I1216 19:56:20.625716 103685 request.go:632] Waited for 196.314792ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-scheduler-ha-082404-m03
I1216 19:56:20.625786 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-scheduler-ha-082404-m03
I1216 19:56:20.625796 103685 round_trippers.go:469] Request Headers:
I1216 19:56:20.625805 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:20.625814 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:20.628862 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:56:20.825815 103685 request.go:632] Waited for 196.311075ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/nodes/ha-082404-m03
I1216 19:56:20.825898 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m03
I1216 19:56:20.825905 103685 round_trippers.go:469] Request Headers:
I1216 19:56:20.825918 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:20.825925 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:20.828557 103685 round_trippers.go:574] Response Status: 404 Not Found in 2 milliseconds
I1216 19:56:20.828703 103685 pod_ready.go:98] node "ha-082404-m03" hosting pod "kube-scheduler-ha-082404-m03" in "kube-system" namespace is currently not "Ready" (skipping!): error getting node "ha-082404-m03": nodes "ha-082404-m03" not found
I1216 19:56:20.828722 103685 pod_ready.go:82] duration metric: took 399.376884ms for pod "kube-scheduler-ha-082404-m03" in "kube-system" namespace to be "Ready" ...
E1216 19:56:20.828733 103685 pod_ready.go:67] WaitExtra: waitPodCondition: node "ha-082404-m03" hosting pod "kube-scheduler-ha-082404-m03" in "kube-system" namespace is currently not "Ready" (skipping!): error getting node "ha-082404-m03": nodes "ha-082404-m03" not found
I1216 19:56:20.828751 103685 pod_ready.go:39] duration metric: took 55.697255273s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I1216 19:56:20.828780 103685 api_server.go:52] waiting for apiserver process to appear ...
I1216 19:56:20.828853 103685 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1216 19:56:20.840568 103685 api_server.go:72] duration metric: took 55.988577826s to wait for apiserver process to appear ...
I1216 19:56:20.840593 103685 api_server.go:88] waiting for apiserver healthz status ...
I1216 19:56:20.840624 103685 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8443/healthz ...
I1216 19:56:20.849662 103685 api_server.go:279] https://192.168.49.2:8443/healthz returned 200:
ok
I1216 19:56:20.849758 103685 round_trippers.go:463] GET https://192.168.49.2:8443/version
I1216 19:56:20.849771 103685 round_trippers.go:469] Request Headers:
I1216 19:56:20.849781 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:20.849787 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:20.850906 103685 round_trippers.go:574] Response Status: 200 OK in 1 milliseconds
I1216 19:56:20.851036 103685 api_server.go:141] control plane version: v1.32.0
I1216 19:56:20.851056 103685 api_server.go:131] duration metric: took 10.455664ms to wait for apiserver health ...
I1216 19:56:20.851066 103685 system_pods.go:43] waiting for kube-system pods to appear ...
I1216 19:56:21.025495 103685 request.go:632] Waited for 174.345221ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods
I1216 19:56:21.025603 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods
I1216 19:56:21.025619 103685 round_trippers.go:469] Request Headers:
I1216 19:56:21.025629 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:21.025633 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:21.031996 103685 round_trippers.go:574] Response Status: 200 OK in 6 milliseconds
I1216 19:56:21.041434 103685 system_pods.go:59] 26 kube-system pods found
I1216 19:56:21.041474 103685 system_pods.go:61] "coredns-668d6bf9bc-9th4p" [56bab989-75df-426f-af86-73cef2741306] Running
I1216 19:56:21.041482 103685 system_pods.go:61] "coredns-668d6bf9bc-mwl2r" [84f8cad3-3121-4fae-83c0-9fe5c573d6d4] Running
I1216 19:56:21.041487 103685 system_pods.go:61] "etcd-ha-082404" [95cff35a-dcde-4bd9-89dd-05c7b42036cc] Running
I1216 19:56:21.041491 103685 system_pods.go:61] "etcd-ha-082404-m02" [e91189dc-b8a0-4ff9-ba7a-af12b90d0c82] Running
I1216 19:56:21.041495 103685 system_pods.go:61] "etcd-ha-082404-m03" [df692a92-f091-47fd-9a90-f007278dc5d4] Running
I1216 19:56:21.041576 103685 system_pods.go:61] "kindnet-8nzqx" [c062cfe1-2c57-4040-8d48-673a935f60f6] Running
I1216 19:56:21.041591 103685 system_pods.go:61] "kindnet-f7n6r" [22adac41-4ba2-4265-b6a1-f80addcffd92] Running
I1216 19:56:21.041596 103685 system_pods.go:61] "kindnet-m64xz" [ae1a3842-84ec-4be8-bb48-9ffa21435040] Running
I1216 19:56:21.041603 103685 system_pods.go:61] "kindnet-p6stw" [f4cb03ed-d63d-44a2-a22b-af8f0a23636c] Running
I1216 19:56:21.041607 103685 system_pods.go:61] "kube-apiserver-ha-082404" [cb879082-55e7-4825-ab02-f366c2f09a3d] Running
I1216 19:56:21.041611 103685 system_pods.go:61] "kube-apiserver-ha-082404-m02" [c4e969de-4014-401c-a809-c8f2f56815dd] Running
I1216 19:56:21.041615 103685 system_pods.go:61] "kube-apiserver-ha-082404-m03" [5d2a0021-3e6e-49ee-8b43-76f233c076c1] Running
I1216 19:56:21.041619 103685 system_pods.go:61] "kube-controller-manager-ha-082404" [1e745f98-ccc4-4511-8318-4e2456571628] Running
I1216 19:56:21.041623 103685 system_pods.go:61] "kube-controller-manager-ha-082404-m02" [2996b9f3-2c14-4864-9e4d-82d58685df57] Running
I1216 19:56:21.041628 103685 system_pods.go:61] "kube-controller-manager-ha-082404-m03" [7d94a045-a18c-4f87-a069-f88908ce9428] Running
I1216 19:56:21.041632 103685 system_pods.go:61] "kube-proxy-kr525" [8b374900-b35c-42e1-8757-ce142b1cf04d] Running
I1216 19:56:21.041645 103685 system_pods.go:61] "kube-proxy-pvlrj" [d5fc0309-78bb-42b3-a61f-82c5d4d9069e] Running
I1216 19:56:21.041653 103685 system_pods.go:61] "kube-proxy-wmg6k" [6d50b21a-c351-47e2-9abd-9fcca1423aff] Running
I1216 19:56:21.041657 103685 system_pods.go:61] "kube-proxy-x7xbp" [ce0d4ca6-fbc9-4f2f-996d-5bd01b41a14f] Running
I1216 19:56:21.041661 103685 system_pods.go:61] "kube-scheduler-ha-082404" [acddb3d3-c314-439a-92db-316e5150ca22] Running
I1216 19:56:21.041667 103685 system_pods.go:61] "kube-scheduler-ha-082404-m02" [3f0e8aae-a325-49d7-b616-4aee03dcca94] Running
I1216 19:56:21.041671 103685 system_pods.go:61] "kube-scheduler-ha-082404-m03" [71f272ed-20a7-4ed3-a16b-7622af2210a2] Running
I1216 19:56:21.041678 103685 system_pods.go:61] "kube-vip-ha-082404" [c70c2ea8-8fce-4883-b4bd-ac4b0f3a285d] Running
I1216 19:56:21.041683 103685 system_pods.go:61] "kube-vip-ha-082404-m02" [d6f98a08-2873-48c7-9fd3-2b4b5cfb6154] Running
I1216 19:56:21.041686 103685 system_pods.go:61] "kube-vip-ha-082404-m03" [de516177-52d7-4f79-9681-8090670d31da] Running
I1216 19:56:21.041689 103685 system_pods.go:61] "storage-provisioner" [3c0d0135-4746-4b03-9877-d30c5297116e] Running
I1216 19:56:21.041696 103685 system_pods.go:74] duration metric: took 190.623372ms to wait for pod list to return data ...
I1216 19:56:21.041708 103685 default_sa.go:34] waiting for default service account to be created ...
I1216 19:56:21.226129 103685 request.go:632] Waited for 184.332455ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/namespaces/default/serviceaccounts
I1216 19:56:21.226197 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/default/serviceaccounts
I1216 19:56:21.226204 103685 round_trippers.go:469] Request Headers:
I1216 19:56:21.226213 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:21.226217 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:21.229666 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:56:21.229999 103685 default_sa.go:45] found service account: "default"
I1216 19:56:21.230023 103685 default_sa.go:55] duration metric: took 188.308102ms for default service account to be created ...
I1216 19:56:21.230034 103685 system_pods.go:116] waiting for k8s-apps to be running ...
I1216 19:56:21.425461 103685 request.go:632] Waited for 195.361083ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods
I1216 19:56:21.425540 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods
I1216 19:56:21.425553 103685 round_trippers.go:469] Request Headers:
I1216 19:56:21.425562 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:21.425570 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:21.431552 103685 round_trippers.go:574] Response Status: 200 OK in 5 milliseconds
I1216 19:56:21.441543 103685 system_pods.go:86] 26 kube-system pods found
I1216 19:56:21.441584 103685 system_pods.go:89] "coredns-668d6bf9bc-9th4p" [56bab989-75df-426f-af86-73cef2741306] Running
I1216 19:56:21.441593 103685 system_pods.go:89] "coredns-668d6bf9bc-mwl2r" [84f8cad3-3121-4fae-83c0-9fe5c573d6d4] Running
I1216 19:56:21.441598 103685 system_pods.go:89] "etcd-ha-082404" [95cff35a-dcde-4bd9-89dd-05c7b42036cc] Running
I1216 19:56:21.441603 103685 system_pods.go:89] "etcd-ha-082404-m02" [e91189dc-b8a0-4ff9-ba7a-af12b90d0c82] Running
I1216 19:56:21.441608 103685 system_pods.go:89] "etcd-ha-082404-m03" [df692a92-f091-47fd-9a90-f007278dc5d4] Running
I1216 19:56:21.441612 103685 system_pods.go:89] "kindnet-8nzqx" [c062cfe1-2c57-4040-8d48-673a935f60f6] Running
I1216 19:56:21.441616 103685 system_pods.go:89] "kindnet-f7n6r" [22adac41-4ba2-4265-b6a1-f80addcffd92] Running
I1216 19:56:21.441620 103685 system_pods.go:89] "kindnet-m64xz" [ae1a3842-84ec-4be8-bb48-9ffa21435040] Running
I1216 19:56:21.441625 103685 system_pods.go:89] "kindnet-p6stw" [f4cb03ed-d63d-44a2-a22b-af8f0a23636c] Running
I1216 19:56:21.441629 103685 system_pods.go:89] "kube-apiserver-ha-082404" [cb879082-55e7-4825-ab02-f366c2f09a3d] Running
I1216 19:56:21.441641 103685 system_pods.go:89] "kube-apiserver-ha-082404-m02" [c4e969de-4014-401c-a809-c8f2f56815dd] Running
I1216 19:56:21.441645 103685 system_pods.go:89] "kube-apiserver-ha-082404-m03" [5d2a0021-3e6e-49ee-8b43-76f233c076c1] Running
I1216 19:56:21.441649 103685 system_pods.go:89] "kube-controller-manager-ha-082404" [1e745f98-ccc4-4511-8318-4e2456571628] Running
I1216 19:56:21.441654 103685 system_pods.go:89] "kube-controller-manager-ha-082404-m02" [2996b9f3-2c14-4864-9e4d-82d58685df57] Running
I1216 19:56:21.441670 103685 system_pods.go:89] "kube-controller-manager-ha-082404-m03" [7d94a045-a18c-4f87-a069-f88908ce9428] Running
I1216 19:56:21.441675 103685 system_pods.go:89] "kube-proxy-kr525" [8b374900-b35c-42e1-8757-ce142b1cf04d] Running
I1216 19:56:21.441679 103685 system_pods.go:89] "kube-proxy-pvlrj" [d5fc0309-78bb-42b3-a61f-82c5d4d9069e] Running
I1216 19:56:21.441684 103685 system_pods.go:89] "kube-proxy-wmg6k" [6d50b21a-c351-47e2-9abd-9fcca1423aff] Running
I1216 19:56:21.441687 103685 system_pods.go:89] "kube-proxy-x7xbp" [ce0d4ca6-fbc9-4f2f-996d-5bd01b41a14f] Running
I1216 19:56:21.441692 103685 system_pods.go:89] "kube-scheduler-ha-082404" [acddb3d3-c314-439a-92db-316e5150ca22] Running
I1216 19:56:21.441696 103685 system_pods.go:89] "kube-scheduler-ha-082404-m02" [3f0e8aae-a325-49d7-b616-4aee03dcca94] Running
I1216 19:56:21.441701 103685 system_pods.go:89] "kube-scheduler-ha-082404-m03" [71f272ed-20a7-4ed3-a16b-7622af2210a2] Running
I1216 19:56:21.441715 103685 system_pods.go:89] "kube-vip-ha-082404" [c70c2ea8-8fce-4883-b4bd-ac4b0f3a285d] Running
I1216 19:56:21.441728 103685 system_pods.go:89] "kube-vip-ha-082404-m02" [d6f98a08-2873-48c7-9fd3-2b4b5cfb6154] Running
I1216 19:56:21.441738 103685 system_pods.go:89] "kube-vip-ha-082404-m03" [de516177-52d7-4f79-9681-8090670d31da] Running
I1216 19:56:21.441750 103685 system_pods.go:89] "storage-provisioner" [3c0d0135-4746-4b03-9877-d30c5297116e] Running
I1216 19:56:21.441761 103685 system_pods.go:126] duration metric: took 211.720634ms to wait for k8s-apps to be running ...
I1216 19:56:21.441770 103685 system_svc.go:44] waiting for kubelet service to be running ....
I1216 19:56:21.441956 103685 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1216 19:56:21.454732 103685 system_svc.go:56] duration metric: took 12.954103ms WaitForService to wait for kubelet
I1216 19:56:21.454802 103685 kubeadm.go:582] duration metric: took 56.602815114s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1216 19:56:21.454828 103685 node_conditions.go:102] verifying NodePressure condition ...
I1216 19:56:21.625163 103685 request.go:632] Waited for 170.24218ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/nodes
I1216 19:56:21.625242 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes
I1216 19:56:21.625253 103685 round_trippers.go:469] Request Headers:
I1216 19:56:21.625263 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:21.625267 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:21.631484 103685 round_trippers.go:574] Response Status: 200 OK in 6 milliseconds
I1216 19:56:21.632990 103685 node_conditions.go:122] node storage ephemeral capacity is 203034800Ki
I1216 19:56:21.633025 103685 node_conditions.go:123] node cpu capacity is 2
I1216 19:56:21.633038 103685 node_conditions.go:122] node storage ephemeral capacity is 203034800Ki
I1216 19:56:21.633044 103685 node_conditions.go:123] node cpu capacity is 2
I1216 19:56:21.633049 103685 node_conditions.go:122] node storage ephemeral capacity is 203034800Ki
I1216 19:56:21.633077 103685 node_conditions.go:123] node cpu capacity is 2
I1216 19:56:21.633091 103685 node_conditions.go:105] duration metric: took 178.255363ms to run NodePressure ...
I1216 19:56:21.633104 103685 start.go:241] waiting for startup goroutines ...
I1216 19:56:21.633132 103685 start.go:255] writing updated cluster config ...
I1216 19:56:21.636346 103685 out.go:201]
I1216 19:56:21.639287 103685 config.go:182] Loaded profile config "ha-082404": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.32.0
I1216 19:56:21.639451 103685 profile.go:143] Saving config to /home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/config.json ...
I1216 19:56:21.642484 103685 out.go:177] * Starting "ha-082404-m04" worker node in "ha-082404" cluster
I1216 19:56:21.645892 103685 cache.go:121] Beginning downloading kic base image for docker with docker
I1216 19:56:21.648455 103685 out.go:177] * Pulling base image v0.0.45-1734029593-20090 ...
I1216 19:56:21.651018 103685 preload.go:131] Checking if preload exists for k8s version v1.32.0 and runtime docker
I1216 19:56:21.651050 103685 cache.go:56] Caching tarball of preloaded images
I1216 19:56:21.651093 103685 image.go:79] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1734029593-20090@sha256:7b3f6168a578563fb342f21f0c926652b91ba060931e8fbc6c6ade3ac1d26ed9 in local docker daemon
I1216 19:56:21.651158 103685 preload.go:172] Found /home/jenkins/minikube-integration/20091-2258/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.32.0-docker-overlay2-arm64.tar.lz4 in cache, skipping download
I1216 19:56:21.651169 103685 cache.go:59] Finished verifying existence of preloaded tar for v1.32.0 on docker
I1216 19:56:21.651307 103685 profile.go:143] Saving config to /home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/config.json ...
I1216 19:56:21.672354 103685 image.go:98] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1734029593-20090@sha256:7b3f6168a578563fb342f21f0c926652b91ba060931e8fbc6c6ade3ac1d26ed9 in local docker daemon, skipping pull
I1216 19:56:21.672374 103685 cache.go:144] gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1734029593-20090@sha256:7b3f6168a578563fb342f21f0c926652b91ba060931e8fbc6c6ade3ac1d26ed9 exists in daemon, skipping load
I1216 19:56:21.672387 103685 cache.go:194] Successfully downloaded all kic artifacts
I1216 19:56:21.672411 103685 start.go:360] acquireMachinesLock for ha-082404-m04: {Name:mkbfef421b2e38a6e5e4a7c28eb280c84a721335 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1216 19:56:21.672578 103685 start.go:364] duration metric: took 92.945µs to acquireMachinesLock for "ha-082404-m04"
I1216 19:56:21.672632 103685 start.go:96] Skipping create...Using existing machine configuration
I1216 19:56:21.672638 103685 fix.go:54] fixHost starting: m04
I1216 19:56:21.673035 103685 cli_runner.go:164] Run: docker container inspect ha-082404-m04 --format={{.State.Status}}
I1216 19:56:21.690725 103685 fix.go:112] recreateIfNeeded on ha-082404-m04: state=Stopped err=<nil>
W1216 19:56:21.690752 103685 fix.go:138] unexpected machine state, will restart: <nil>
I1216 19:56:21.693800 103685 out.go:177] * Restarting existing docker container for "ha-082404-m04" ...
I1216 19:56:21.696457 103685 cli_runner.go:164] Run: docker start ha-082404-m04
I1216 19:56:22.039396 103685 cli_runner.go:164] Run: docker container inspect ha-082404-m04 --format={{.State.Status}}
I1216 19:56:22.063250 103685 kic.go:430] container "ha-082404-m04" state is running.
I1216 19:56:22.063644 103685 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-082404-m04
I1216 19:56:22.087920 103685 profile.go:143] Saving config to /home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/config.json ...
I1216 19:56:22.088171 103685 machine.go:93] provisionDockerMachine start ...
I1216 19:56:22.088231 103685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-082404-m04
I1216 19:56:22.111363 103685 main.go:141] libmachine: Using SSH client type: native
I1216 19:56:22.111806 103685 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x416340] 0x418b80 <nil> [] 0s} 127.0.0.1 32838 <nil> <nil>}
I1216 19:56:22.111816 103685 main.go:141] libmachine: About to run SSH command:
hostname
I1216 19:56:22.112722 103685 main.go:141] libmachine: Error dialing TCP: ssh: handshake failed: EOF
I1216 19:56:25.277547 103685 main.go:141] libmachine: SSH cmd err, output: <nil>: ha-082404-m04
I1216 19:56:25.277573 103685 ubuntu.go:169] provisioning hostname "ha-082404-m04"
I1216 19:56:25.277636 103685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-082404-m04
I1216 19:56:25.298920 103685 main.go:141] libmachine: Using SSH client type: native
I1216 19:56:25.299177 103685 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x416340] 0x418b80 <nil> [] 0s} 127.0.0.1 32838 <nil> <nil>}
I1216 19:56:25.299195 103685 main.go:141] libmachine: About to run SSH command:
sudo hostname ha-082404-m04 && echo "ha-082404-m04" | sudo tee /etc/hostname
I1216 19:56:25.480137 103685 main.go:141] libmachine: SSH cmd err, output: <nil>: ha-082404-m04
I1216 19:56:25.480216 103685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-082404-m04
I1216 19:56:25.498886 103685 main.go:141] libmachine: Using SSH client type: native
I1216 19:56:25.499131 103685 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x416340] 0x418b80 <nil> [] 0s} 127.0.0.1 32838 <nil> <nil>}
I1216 19:56:25.499154 103685 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\sha-082404-m04' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 ha-082404-m04/g' /etc/hosts;
else
echo '127.0.1.1 ha-082404-m04' | sudo tee -a /etc/hosts;
fi
fi
I1216 19:56:25.650093 103685 main.go:141] libmachine: SSH cmd err, output: <nil>:
I1216 19:56:25.650174 103685 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/20091-2258/.minikube CaCertPath:/home/jenkins/minikube-integration/20091-2258/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/20091-2258/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/20091-2258/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/20091-2258/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/20091-2258/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/20091-2258/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/20091-2258/.minikube}
I1216 19:56:25.650212 103685 ubuntu.go:177] setting up certificates
I1216 19:56:25.650241 103685 provision.go:84] configureAuth start
I1216 19:56:25.650320 103685 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-082404-m04
I1216 19:56:25.669571 103685 provision.go:143] copyHostCerts
I1216 19:56:25.669608 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/certs/ca.pem -> /home/jenkins/minikube-integration/20091-2258/.minikube/ca.pem
I1216 19:56:25.669640 103685 exec_runner.go:144] found /home/jenkins/minikube-integration/20091-2258/.minikube/ca.pem, removing ...
I1216 19:56:25.669646 103685 exec_runner.go:203] rm: /home/jenkins/minikube-integration/20091-2258/.minikube/ca.pem
I1216 19:56:25.669798 103685 exec_runner.go:151] cp: /home/jenkins/minikube-integration/20091-2258/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/20091-2258/.minikube/ca.pem (1082 bytes)
I1216 19:56:25.669984 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/certs/cert.pem -> /home/jenkins/minikube-integration/20091-2258/.minikube/cert.pem
I1216 19:56:25.670007 103685 exec_runner.go:144] found /home/jenkins/minikube-integration/20091-2258/.minikube/cert.pem, removing ...
I1216 19:56:25.670012 103685 exec_runner.go:203] rm: /home/jenkins/minikube-integration/20091-2258/.minikube/cert.pem
I1216 19:56:25.670108 103685 exec_runner.go:151] cp: /home/jenkins/minikube-integration/20091-2258/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/20091-2258/.minikube/cert.pem (1123 bytes)
I1216 19:56:25.670248 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/certs/key.pem -> /home/jenkins/minikube-integration/20091-2258/.minikube/key.pem
I1216 19:56:25.670276 103685 exec_runner.go:144] found /home/jenkins/minikube-integration/20091-2258/.minikube/key.pem, removing ...
I1216 19:56:25.670281 103685 exec_runner.go:203] rm: /home/jenkins/minikube-integration/20091-2258/.minikube/key.pem
I1216 19:56:25.670397 103685 exec_runner.go:151] cp: /home/jenkins/minikube-integration/20091-2258/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/20091-2258/.minikube/key.pem (1675 bytes)
I1216 19:56:25.670502 103685 provision.go:117] generating server cert: /home/jenkins/minikube-integration/20091-2258/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/20091-2258/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/20091-2258/.minikube/certs/ca-key.pem org=jenkins.ha-082404-m04 san=[127.0.0.1 192.168.49.5 ha-082404-m04 localhost minikube]
I1216 19:56:25.951558 103685 provision.go:177] copyRemoteCerts
I1216 19:56:25.951696 103685 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1216 19:56:25.951768 103685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-082404-m04
I1216 19:56:25.978960 103685 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32838 SSHKeyPath:/home/jenkins/minikube-integration/20091-2258/.minikube/machines/ha-082404-m04/id_rsa Username:docker}
I1216 19:56:26.088131 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/certs/ca.pem -> /etc/docker/ca.pem
I1216 19:56:26.088197 103685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20091-2258/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1216 19:56:26.116742 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/machines/server.pem -> /etc/docker/server.pem
I1216 19:56:26.116808 103685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20091-2258/.minikube/machines/server.pem --> /etc/docker/server.pem (1208 bytes)
I1216 19:56:26.143381 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/machines/server-key.pem -> /etc/docker/server-key.pem
I1216 19:56:26.143448 103685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20091-2258/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I1216 19:56:26.171661 103685 provision.go:87] duration metric: took 521.391251ms to configureAuth
I1216 19:56:26.171689 103685 ubuntu.go:193] setting minikube options for container-runtime
I1216 19:56:26.171926 103685 config.go:182] Loaded profile config "ha-082404": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.32.0
I1216 19:56:26.171989 103685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-082404-m04
I1216 19:56:26.192881 103685 main.go:141] libmachine: Using SSH client type: native
I1216 19:56:26.193116 103685 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x416340] 0x418b80 <nil> [] 0s} 127.0.0.1 32838 <nil> <nil>}
I1216 19:56:26.193128 103685 main.go:141] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I1216 19:56:26.343116 103685 main.go:141] libmachine: SSH cmd err, output: <nil>: overlay
I1216 19:56:26.343140 103685 ubuntu.go:71] root file system type: overlay
I1216 19:56:26.343297 103685 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I1216 19:56:26.343369 103685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-082404-m04
I1216 19:56:26.362478 103685 main.go:141] libmachine: Using SSH client type: native
I1216 19:56:26.362828 103685 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x416340] 0x418b80 <nil> [] 0s} 127.0.0.1 32838 <nil> <nil>}
I1216 19:56:26.362919 103685 main.go:141] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
Environment="NO_PROXY=192.168.49.2"
Environment="NO_PROXY=192.168.49.2,192.168.49.3"
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I1216 19:56:26.528065 103685 main.go:141] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
Environment=NO_PROXY=192.168.49.2
Environment=NO_PROXY=192.168.49.2,192.168.49.3
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
I1216 19:56:26.528244 103685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-082404-m04
I1216 19:56:26.547382 103685 main.go:141] libmachine: Using SSH client type: native
I1216 19:56:26.547644 103685 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x416340] 0x418b80 <nil> [] 0s} 127.0.0.1 32838 <nil> <nil>}
I1216 19:56:26.547668 103685 main.go:141] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I1216 19:56:27.503593 103685 main.go:141] libmachine: SSH cmd err, output: <nil>: --- /lib/systemd/system/docker.service 2024-12-16 19:53:45.448092769 +0000
+++ /lib/systemd/system/docker.service.new 2024-12-16 19:56:26.523170095 +0000
@@ -14,7 +14,6 @@
Environment=NO_PROXY=192.168.49.2
Environment=NO_PROXY=192.168.49.2,192.168.49.3
-Environment=NO_PROXY=192.168.49.2,192.168.49.3,192.168.49.4
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
Synchronizing state of docker.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable docker
I1216 19:56:27.503684 103685 machine.go:96] duration metric: took 5.41550101s to provisionDockerMachine
I1216 19:56:27.503712 103685 start.go:293] postStartSetup for "ha-082404-m04" (driver="docker")
I1216 19:56:27.503752 103685 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1216 19:56:27.503867 103685 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1216 19:56:27.503936 103685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-082404-m04
I1216 19:56:27.526130 103685 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32838 SSHKeyPath:/home/jenkins/minikube-integration/20091-2258/.minikube/machines/ha-082404-m04/id_rsa Username:docker}
I1216 19:56:27.632347 103685 ssh_runner.go:195] Run: cat /etc/os-release
I1216 19:56:27.636258 103685 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1216 19:56:27.636292 103685 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I1216 19:56:27.636303 103685 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I1216 19:56:27.636310 103685 info.go:137] Remote host: Ubuntu 22.04.5 LTS
I1216 19:56:27.636320 103685 filesync.go:126] Scanning /home/jenkins/minikube-integration/20091-2258/.minikube/addons for local assets ...
I1216 19:56:27.636378 103685 filesync.go:126] Scanning /home/jenkins/minikube-integration/20091-2258/.minikube/files for local assets ...
I1216 19:56:27.636447 103685 filesync.go:149] local asset: /home/jenkins/minikube-integration/20091-2258/.minikube/files/etc/ssl/certs/75692.pem -> 75692.pem in /etc/ssl/certs
I1216 19:56:27.636454 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/files/etc/ssl/certs/75692.pem -> /etc/ssl/certs/75692.pem
I1216 19:56:27.636554 103685 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs
I1216 19:56:27.646385 103685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20091-2258/.minikube/files/etc/ssl/certs/75692.pem --> /etc/ssl/certs/75692.pem (1708 bytes)
I1216 19:56:27.689390 103685 start.go:296] duration metric: took 185.636209ms for postStartSetup
I1216 19:56:27.689513 103685 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1216 19:56:27.689583 103685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-082404-m04
I1216 19:56:27.710027 103685 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32838 SSHKeyPath:/home/jenkins/minikube-integration/20091-2258/.minikube/machines/ha-082404-m04/id_rsa Username:docker}
I1216 19:56:27.815063 103685 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1216 19:56:27.819956 103685 fix.go:56] duration metric: took 6.147312259s for fixHost
I1216 19:56:27.819986 103685 start.go:83] releasing machines lock for "ha-082404-m04", held for 6.14736998s
I1216 19:56:27.820063 103685 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-082404-m04
I1216 19:56:27.840254 103685 out.go:177] * Found network options:
I1216 19:56:27.842884 103685 out.go:177] - NO_PROXY=192.168.49.2,192.168.49.3
W1216 19:56:27.845382 103685 proxy.go:119] fail to check proxy env: Error ip not in block
W1216 19:56:27.845403 103685 proxy.go:119] fail to check proxy env: Error ip not in block
W1216 19:56:27.845426 103685 proxy.go:119] fail to check proxy env: Error ip not in block
W1216 19:56:27.845442 103685 proxy.go:119] fail to check proxy env: Error ip not in block
I1216 19:56:27.845515 103685 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I1216 19:56:27.845560 103685 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1216 19:56:27.845617 103685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-082404-m04
I1216 19:56:27.845563 103685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-082404-m04
I1216 19:56:27.864348 103685 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32838 SSHKeyPath:/home/jenkins/minikube-integration/20091-2258/.minikube/machines/ha-082404-m04/id_rsa Username:docker}
I1216 19:56:27.867303 103685 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32838 SSHKeyPath:/home/jenkins/minikube-integration/20091-2258/.minikube/machines/ha-082404-m04/id_rsa Username:docker}
I1216 19:56:28.105016 103685 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I1216 19:56:28.128883 103685 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I1216 19:56:28.128969 103685 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1216 19:56:28.139463 103685 cni.go:259] no active bridge cni configs found in "/etc/cni/net.d" - nothing to disable
I1216 19:56:28.139492 103685 start.go:495] detecting cgroup driver to use...
I1216 19:56:28.139530 103685 detect.go:187] detected "cgroupfs" cgroup driver on host os
I1216 19:56:28.139623 103685 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I1216 19:56:28.157360 103685 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10"|' /etc/containerd/config.toml"
I1216 19:56:28.168032 103685 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I1216 19:56:28.178280 103685 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I1216 19:56:28.178373 103685 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I1216 19:56:28.189758 103685 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1216 19:56:28.201408 103685 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I1216 19:56:28.211923 103685 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I1216 19:56:28.233612 103685 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1216 19:56:28.245064 103685 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I1216 19:56:28.255387 103685 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I1216 19:56:28.265806 103685 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I1216 19:56:28.276335 103685 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1216 19:56:28.292378 103685 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1216 19:56:28.301286 103685 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1216 19:56:28.396540 103685 ssh_runner.go:195] Run: sudo systemctl restart containerd
I1216 19:56:28.510463 103685 start.go:495] detecting cgroup driver to use...
I1216 19:56:28.510535 103685 detect.go:187] detected "cgroupfs" cgroup driver on host os
I1216 19:56:28.510627 103685 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I1216 19:56:28.537200 103685 cruntime.go:279] skipping containerd shutdown because we are bound to it
I1216 19:56:28.537301 103685 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I1216 19:56:28.553279 103685 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I1216 19:56:28.575858 103685 ssh_runner.go:195] Run: which cri-dockerd
I1216 19:56:28.581378 103685 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I1216 19:56:28.593589 103685 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (190 bytes)
I1216 19:56:28.619251 103685 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I1216 19:56:28.772603 103685 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I1216 19:56:28.900292 103685 docker.go:574] configuring docker to use "cgroupfs" as cgroup driver...
I1216 19:56:28.900376 103685 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
I1216 19:56:28.933668 103685 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1216 19:56:29.072729 103685 ssh_runner.go:195] Run: sudo systemctl restart docker
I1216 19:56:29.508427 103685 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I1216 19:56:29.523686 103685 ssh_runner.go:195] Run: sudo systemctl stop cri-docker.socket
I1216 19:56:29.541797 103685 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1216 19:56:29.555024 103685 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I1216 19:56:29.655142 103685 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I1216 19:56:29.756812 103685 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1216 19:56:29.857268 103685 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I1216 19:56:29.873344 103685 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I1216 19:56:29.887508 103685 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1216 19:56:29.996934 103685 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I1216 19:56:30.146562 103685 start.go:542] Will wait 60s for socket path /var/run/cri-dockerd.sock
I1216 19:56:30.146647 103685 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I1216 19:56:30.152683 103685 start.go:563] Will wait 60s for crictl version
I1216 19:56:30.152762 103685 ssh_runner.go:195] Run: which crictl
I1216 19:56:30.158324 103685 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I1216 19:56:30.205924 103685 start.go:579] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 27.4.0
RuntimeApiVersion: v1
I1216 19:56:30.206044 103685 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I1216 19:56:30.237938 103685 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I1216 19:56:30.268323 103685 out.go:235] * Preparing Kubernetes v1.32.0 on Docker 27.4.0 ...
I1216 19:56:30.270925 103685 out.go:177] - env NO_PROXY=192.168.49.2
I1216 19:56:30.273691 103685 out.go:177] - env NO_PROXY=192.168.49.2,192.168.49.3
I1216 19:56:30.276272 103685 cli_runner.go:164] Run: docker network inspect ha-082404 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1216 19:56:30.297951 103685 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I1216 19:56:30.302947 103685 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1216 19:56:30.319975 103685 mustload.go:65] Loading cluster: ha-082404
I1216 19:56:30.320281 103685 config.go:182] Loaded profile config "ha-082404": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.32.0
I1216 19:56:30.320640 103685 cli_runner.go:164] Run: docker container inspect ha-082404 --format={{.State.Status}}
I1216 19:56:30.340187 103685 host.go:66] Checking if "ha-082404" exists ...
I1216 19:56:30.340492 103685 certs.go:68] Setting up /home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404 for IP: 192.168.49.5
I1216 19:56:30.340508 103685 certs.go:194] generating shared ca certs ...
I1216 19:56:30.340525 103685 certs.go:226] acquiring lock for ca certs: {Name:mk61ac4ce13eccd2c732f8ba869cb043f9f7a744 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1216 19:56:30.340650 103685 certs.go:235] skipping valid "minikubeCA" ca cert: /home/jenkins/minikube-integration/20091-2258/.minikube/ca.key
I1216 19:56:30.340696 103685 certs.go:235] skipping valid "proxyClientCA" ca cert: /home/jenkins/minikube-integration/20091-2258/.minikube/proxy-client-ca.key
I1216 19:56:30.340711 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/ca.crt -> /var/lib/minikube/certs/ca.crt
I1216 19:56:30.340726 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/ca.key -> /var/lib/minikube/certs/ca.key
I1216 19:56:30.340743 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/proxy-client-ca.crt -> /var/lib/minikube/certs/proxy-client-ca.crt
I1216 19:56:30.340757 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/proxy-client-ca.key -> /var/lib/minikube/certs/proxy-client-ca.key
I1216 19:56:30.340824 103685 certs.go:484] found cert: /home/jenkins/minikube-integration/20091-2258/.minikube/certs/7569.pem (1338 bytes)
W1216 19:56:30.340862 103685 certs.go:480] ignoring /home/jenkins/minikube-integration/20091-2258/.minikube/certs/7569_empty.pem, impossibly tiny 0 bytes
I1216 19:56:30.340877 103685 certs.go:484] found cert: /home/jenkins/minikube-integration/20091-2258/.minikube/certs/ca-key.pem (1679 bytes)
I1216 19:56:30.340906 103685 certs.go:484] found cert: /home/jenkins/minikube-integration/20091-2258/.minikube/certs/ca.pem (1082 bytes)
I1216 19:56:30.340935 103685 certs.go:484] found cert: /home/jenkins/minikube-integration/20091-2258/.minikube/certs/cert.pem (1123 bytes)
I1216 19:56:30.340962 103685 certs.go:484] found cert: /home/jenkins/minikube-integration/20091-2258/.minikube/certs/key.pem (1675 bytes)
I1216 19:56:30.341022 103685 certs.go:484] found cert: /home/jenkins/minikube-integration/20091-2258/.minikube/files/etc/ssl/certs/75692.pem (1708 bytes)
I1216 19:56:30.341061 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/files/etc/ssl/certs/75692.pem -> /usr/share/ca-certificates/75692.pem
I1216 19:56:30.341082 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/ca.crt -> /usr/share/ca-certificates/minikubeCA.pem
I1216 19:56:30.341101 103685 vm_assets.go:164] NewFileAsset: /home/jenkins/minikube-integration/20091-2258/.minikube/certs/7569.pem -> /usr/share/ca-certificates/7569.pem
I1216 19:56:30.341130 103685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20091-2258/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1216 19:56:30.373731 103685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20091-2258/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1216 19:56:30.399959 103685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20091-2258/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1216 19:56:30.424509 103685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20091-2258/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I1216 19:56:30.450525 103685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20091-2258/.minikube/files/etc/ssl/certs/75692.pem --> /usr/share/ca-certificates/75692.pem (1708 bytes)
I1216 19:56:30.478017 103685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20091-2258/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1216 19:56:30.502499 103685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20091-2258/.minikube/certs/7569.pem --> /usr/share/ca-certificates/7569.pem (1338 bytes)
I1216 19:56:30.529293 103685 ssh_runner.go:195] Run: openssl version
I1216 19:56:30.535068 103685 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/75692.pem && ln -fs /usr/share/ca-certificates/75692.pem /etc/ssl/certs/75692.pem"
I1216 19:56:30.544829 103685 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/75692.pem
I1216 19:56:30.548643 103685 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Dec 16 19:42 /usr/share/ca-certificates/75692.pem
I1216 19:56:30.548736 103685 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/75692.pem
I1216 19:56:30.555655 103685 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/75692.pem /etc/ssl/certs/3ec20f2e.0"
I1216 19:56:30.565240 103685 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1216 19:56:30.575984 103685 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1216 19:56:30.579780 103685 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Dec 16 19:35 /usr/share/ca-certificates/minikubeCA.pem
I1216 19:56:30.579853 103685 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1216 19:56:30.587436 103685 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1216 19:56:30.596934 103685 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/7569.pem && ln -fs /usr/share/ca-certificates/7569.pem /etc/ssl/certs/7569.pem"
I1216 19:56:30.607031 103685 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/7569.pem
I1216 19:56:30.610989 103685 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Dec 16 19:42 /usr/share/ca-certificates/7569.pem
I1216 19:56:30.611094 103685 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/7569.pem
I1216 19:56:30.618258 103685 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/7569.pem /etc/ssl/certs/51391683.0"
I1216 19:56:30.627658 103685 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1216 19:56:30.631321 103685 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I1216 19:56:30.631362 103685 kubeadm.go:934] updating node {m04 192.168.49.5 0 v1.32.0 false true} ...
I1216 19:56:30.631453 103685 kubeadm.go:946] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.32.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=ha-082404-m04 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.5
[Install]
config:
{KubernetesVersion:v1.32.0 ClusterName:ha-082404 Namespace:default APIServerHAVIP:192.168.49.254 APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1216 19:56:30.631526 103685 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.32.0
I1216 19:56:30.641050 103685 binaries.go:44] Found k8s binaries, skipping transfer
I1216 19:56:30.641174 103685 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system
I1216 19:56:30.650273 103685 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (312 bytes)
I1216 19:56:30.674390 103685 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1216 19:56:30.696364 103685 ssh_runner.go:195] Run: grep 192.168.49.254 control-plane.minikube.internal$ /etc/hosts
I1216 19:56:30.700898 103685 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.254 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1216 19:56:30.712441 103685 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1216 19:56:30.810974 103685 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1216 19:56:30.824547 103685 start.go:235] Will wait 6m0s for node &{Name:m04 IP:192.168.49.5 Port:0 KubernetesVersion:v1.32.0 ContainerRuntime: ControlPlane:false Worker:true}
I1216 19:56:30.824958 103685 config.go:182] Loaded profile config "ha-082404": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.32.0
I1216 19:56:30.827965 103685 out.go:177] * Verifying Kubernetes components...
I1216 19:56:30.830655 103685 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1216 19:56:30.916428 103685 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1216 19:56:30.928719 103685 loader.go:395] Config loaded from file: /home/jenkins/minikube-integration/20091-2258/kubeconfig
I1216 19:56:30.928989 103685 kapi.go:59] client config for ha-082404: &rest.Config{Host:"https://192.168.49.254:8443", APIPath:"", ContentConfig:rest.ContentConfig{AcceptContentTypes:"", ContentType:"", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"", Password:"", BearerToken:"", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"", UID:"", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:<nil>, AuthConfigPersister:rest.AuthProviderConfigPersister(nil), ExecProvider:<nil>, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"/home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/client.crt", KeyFile:"/home/jenkins/minikube-integration/20091-2258/.minikube/profiles/ha-082404/client.key", CAFile:"/home/jenkins/minikube-integration/20091-2258/.minikube/ca.crt", CertData:[]uint8(nil), KeyData:[]uint8(nil), CAData:[]uint8(nil), NextProtos:[]string(nil)},
UserAgent:"", DisableCompression:false, Transport:http.RoundTripper(nil), WrapTransport:(transport.WrapperFunc)(0x1eafe20), QPS:0, Burst:0, RateLimiter:flowcontrol.RateLimiter(nil), WarningHandler:rest.WarningHandler(nil), Timeout:0, Dial:(func(context.Context, string, string) (net.Conn, error))(nil), Proxy:(func(*http.Request) (*url.URL, error))(nil)}
W1216 19:56:30.929049 103685 kubeadm.go:483] Overriding stale ClientConfig host https://192.168.49.254:8443 with https://192.168.49.2:8443
I1216 19:56:30.929250 103685 node_ready.go:35] waiting up to 6m0s for node "ha-082404-m04" to be "Ready" ...
I1216 19:56:30.929321 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m04
I1216 19:56:30.929333 103685 round_trippers.go:469] Request Headers:
I1216 19:56:30.929341 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:30.929350 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:30.932322 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:30.932850 103685 node_ready.go:49] node "ha-082404-m04" has status "Ready":"True"
I1216 19:56:30.932872 103685 node_ready.go:38] duration metric: took 3.605533ms for node "ha-082404-m04" to be "Ready" ...
I1216 19:56:30.932884 103685 pod_ready.go:36] extra waiting up to 6m0s for all system-critical pods including labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I1216 19:56:30.932955 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods
I1216 19:56:30.932966 103685 round_trippers.go:469] Request Headers:
I1216 19:56:30.932974 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:30.932978 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:30.937924 103685 round_trippers.go:574] Response Status: 200 OK in 4 milliseconds
I1216 19:56:30.945060 103685 pod_ready.go:79] waiting up to 6m0s for pod "coredns-668d6bf9bc-9th4p" in "kube-system" namespace to be "Ready" ...
I1216 19:56:30.945203 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/coredns-668d6bf9bc-9th4p
I1216 19:56:30.945243 103685 round_trippers.go:469] Request Headers:
I1216 19:56:30.945263 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:30.945269 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:30.948167 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:30.949205 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:56:30.949226 103685 round_trippers.go:469] Request Headers:
I1216 19:56:30.949236 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:30.949240 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:30.951762 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:30.952566 103685 pod_ready.go:93] pod "coredns-668d6bf9bc-9th4p" in "kube-system" namespace has status "Ready":"True"
I1216 19:56:30.952590 103685 pod_ready.go:82] duration metric: took 7.489401ms for pod "coredns-668d6bf9bc-9th4p" in "kube-system" namespace to be "Ready" ...
I1216 19:56:30.952603 103685 pod_ready.go:79] waiting up to 6m0s for pod "coredns-668d6bf9bc-mwl2r" in "kube-system" namespace to be "Ready" ...
I1216 19:56:30.952665 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/coredns-668d6bf9bc-mwl2r
I1216 19:56:30.952676 103685 round_trippers.go:469] Request Headers:
I1216 19:56:30.952684 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:30.952688 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:30.955524 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:30.956533 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:56:30.956552 103685 round_trippers.go:469] Request Headers:
I1216 19:56:30.956562 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:30.956567 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:30.959332 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:30.959840 103685 pod_ready.go:93] pod "coredns-668d6bf9bc-mwl2r" in "kube-system" namespace has status "Ready":"True"
I1216 19:56:30.959861 103685 pod_ready.go:82] duration metric: took 7.251073ms for pod "coredns-668d6bf9bc-mwl2r" in "kube-system" namespace to be "Ready" ...
I1216 19:56:30.959874 103685 pod_ready.go:79] waiting up to 6m0s for pod "etcd-ha-082404" in "kube-system" namespace to be "Ready" ...
I1216 19:56:30.959944 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/etcd-ha-082404
I1216 19:56:30.959954 103685 round_trippers.go:469] Request Headers:
I1216 19:56:30.959965 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:30.959969 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:30.962649 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:30.963318 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:56:30.963337 103685 round_trippers.go:469] Request Headers:
I1216 19:56:30.963348 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:30.963355 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:30.965764 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:30.966478 103685 pod_ready.go:93] pod "etcd-ha-082404" in "kube-system" namespace has status "Ready":"True"
I1216 19:56:30.966500 103685 pod_ready.go:82] duration metric: took 6.611412ms for pod "etcd-ha-082404" in "kube-system" namespace to be "Ready" ...
I1216 19:56:30.966515 103685 pod_ready.go:79] waiting up to 6m0s for pod "etcd-ha-082404-m02" in "kube-system" namespace to be "Ready" ...
I1216 19:56:30.966628 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/etcd-ha-082404-m02
I1216 19:56:30.966639 103685 round_trippers.go:469] Request Headers:
I1216 19:56:30.966648 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:30.966655 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:30.969198 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:30.969950 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:30.969971 103685 round_trippers.go:469] Request Headers:
I1216 19:56:30.969980 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:30.969984 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:30.972357 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:30.973012 103685 pod_ready.go:93] pod "etcd-ha-082404-m02" in "kube-system" namespace has status "Ready":"True"
I1216 19:56:30.973032 103685 pod_ready.go:82] duration metric: took 6.484236ms for pod "etcd-ha-082404-m02" in "kube-system" namespace to be "Ready" ...
I1216 19:56:30.973055 103685 pod_ready.go:79] waiting up to 6m0s for pod "kube-apiserver-ha-082404" in "kube-system" namespace to be "Ready" ...
I1216 19:56:31.130296 103685 request.go:632] Waited for 157.170563ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-apiserver-ha-082404
I1216 19:56:31.130393 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-apiserver-ha-082404
I1216 19:56:31.130411 103685 round_trippers.go:469] Request Headers:
I1216 19:56:31.130425 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:31.130438 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:31.133748 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:56:31.330290 103685 request.go:632] Waited for 195.221105ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:56:31.330382 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:56:31.330395 103685 round_trippers.go:469] Request Headers:
I1216 19:56:31.330404 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:31.330409 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:31.333313 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:31.334067 103685 pod_ready.go:93] pod "kube-apiserver-ha-082404" in "kube-system" namespace has status "Ready":"True"
I1216 19:56:31.334087 103685 pod_ready.go:82] duration metric: took 361.020589ms for pod "kube-apiserver-ha-082404" in "kube-system" namespace to be "Ready" ...
I1216 19:56:31.334100 103685 pod_ready.go:79] waiting up to 6m0s for pod "kube-apiserver-ha-082404-m02" in "kube-system" namespace to be "Ready" ...
I1216 19:56:31.530221 103685 request.go:632] Waited for 196.046032ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-apiserver-ha-082404-m02
I1216 19:56:31.530308 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-apiserver-ha-082404-m02
I1216 19:56:31.530337 103685 round_trippers.go:469] Request Headers:
I1216 19:56:31.530364 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:31.530377 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:31.534850 103685 round_trippers.go:574] Response Status: 200 OK in 4 milliseconds
I1216 19:56:31.729520 103685 request.go:632] Waited for 192.172486ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:31.729609 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:31.729630 103685 round_trippers.go:469] Request Headers:
I1216 19:56:31.729639 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:31.729644 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:31.734424 103685 round_trippers.go:574] Response Status: 200 OK in 4 milliseconds
I1216 19:56:31.735635 103685 pod_ready.go:93] pod "kube-apiserver-ha-082404-m02" in "kube-system" namespace has status "Ready":"True"
I1216 19:56:31.735658 103685 pod_ready.go:82] duration metric: took 401.549615ms for pod "kube-apiserver-ha-082404-m02" in "kube-system" namespace to be "Ready" ...
I1216 19:56:31.735672 103685 pod_ready.go:79] waiting up to 6m0s for pod "kube-controller-manager-ha-082404" in "kube-system" namespace to be "Ready" ...
I1216 19:56:31.929593 103685 request.go:632] Waited for 193.855761ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:56:31.929652 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404
I1216 19:56:31.929665 103685 round_trippers.go:469] Request Headers:
I1216 19:56:31.929674 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:31.929689 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:31.932582 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:32.129682 103685 request.go:632] Waited for 196.303669ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:56:32.129758 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:56:32.129767 103685 round_trippers.go:469] Request Headers:
I1216 19:56:32.129775 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:32.129782 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:32.132627 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:32.133318 103685 pod_ready.go:93] pod "kube-controller-manager-ha-082404" in "kube-system" namespace has status "Ready":"True"
I1216 19:56:32.133341 103685 pod_ready.go:82] duration metric: took 397.660252ms for pod "kube-controller-manager-ha-082404" in "kube-system" namespace to be "Ready" ...
I1216 19:56:32.133354 103685 pod_ready.go:79] waiting up to 6m0s for pod "kube-controller-manager-ha-082404-m02" in "kube-system" namespace to be "Ready" ...
I1216 19:56:32.329739 103685 request.go:632] Waited for 196.317208ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:56:32.329924 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-controller-manager-ha-082404-m02
I1216 19:56:32.329937 103685 round_trippers.go:469] Request Headers:
I1216 19:56:32.329946 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:32.329951 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:32.332733 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:32.529889 103685 request.go:632] Waited for 196.345047ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:32.529950 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:32.529958 103685 round_trippers.go:469] Request Headers:
I1216 19:56:32.529968 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:32.529975 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:32.532947 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:32.533534 103685 pod_ready.go:93] pod "kube-controller-manager-ha-082404-m02" in "kube-system" namespace has status "Ready":"True"
I1216 19:56:32.533558 103685 pod_ready.go:82] duration metric: took 400.195143ms for pod "kube-controller-manager-ha-082404-m02" in "kube-system" namespace to be "Ready" ...
I1216 19:56:32.533572 103685 pod_ready.go:79] waiting up to 6m0s for pod "kube-proxy-pvlrj" in "kube-system" namespace to be "Ready" ...
I1216 19:56:32.729968 103685 request.go:632] Waited for 196.332805ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-proxy-pvlrj
I1216 19:56:32.730036 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-proxy-pvlrj
I1216 19:56:32.730047 103685 round_trippers.go:469] Request Headers:
I1216 19:56:32.730056 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:32.730061 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:32.736523 103685 round_trippers.go:574] Response Status: 200 OK in 6 milliseconds
I1216 19:56:32.929677 103685 request.go:632] Waited for 191.336116ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/nodes/ha-082404-m04
I1216 19:56:32.929755 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m04
I1216 19:56:32.929768 103685 round_trippers.go:469] Request Headers:
I1216 19:56:32.929777 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:32.929783 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:32.932651 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:32.933187 103685 pod_ready.go:93] pod "kube-proxy-pvlrj" in "kube-system" namespace has status "Ready":"True"
I1216 19:56:32.933204 103685 pod_ready.go:82] duration metric: took 399.624591ms for pod "kube-proxy-pvlrj" in "kube-system" namespace to be "Ready" ...
I1216 19:56:32.933217 103685 pod_ready.go:79] waiting up to 6m0s for pod "kube-proxy-wmg6k" in "kube-system" namespace to be "Ready" ...
I1216 19:56:33.130202 103685 request.go:632] Waited for 196.921444ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-proxy-wmg6k
I1216 19:56:33.130264 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-proxy-wmg6k
I1216 19:56:33.130274 103685 round_trippers.go:469] Request Headers:
I1216 19:56:33.130285 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:33.130294 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:33.133319 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:56:33.330212 103685 request.go:632] Waited for 196.149495ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:33.330270 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:33.330275 103685 round_trippers.go:469] Request Headers:
I1216 19:56:33.330285 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:33.330289 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:33.333034 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:33.333584 103685 pod_ready.go:93] pod "kube-proxy-wmg6k" in "kube-system" namespace has status "Ready":"True"
I1216 19:56:33.333603 103685 pod_ready.go:82] duration metric: took 400.378734ms for pod "kube-proxy-wmg6k" in "kube-system" namespace to be "Ready" ...
I1216 19:56:33.333635 103685 pod_ready.go:79] waiting up to 6m0s for pod "kube-proxy-x7xbp" in "kube-system" namespace to be "Ready" ...
I1216 19:56:33.529578 103685 request.go:632] Waited for 195.874517ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-proxy-x7xbp
I1216 19:56:33.529657 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-proxy-x7xbp
I1216 19:56:33.529668 103685 round_trippers.go:469] Request Headers:
I1216 19:56:33.529684 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:33.529693 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:33.533017 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:56:33.730207 103685 request.go:632] Waited for 196.259532ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:56:33.730324 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:56:33.730355 103685 round_trippers.go:469] Request Headers:
I1216 19:56:33.730378 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:33.730396 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:33.733058 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:33.733569 103685 pod_ready.go:93] pod "kube-proxy-x7xbp" in "kube-system" namespace has status "Ready":"True"
I1216 19:56:33.733584 103685 pod_ready.go:82] duration metric: took 399.940134ms for pod "kube-proxy-x7xbp" in "kube-system" namespace to be "Ready" ...
I1216 19:56:33.733595 103685 pod_ready.go:79] waiting up to 6m0s for pod "kube-scheduler-ha-082404" in "kube-system" namespace to be "Ready" ...
I1216 19:56:33.929513 103685 request.go:632] Waited for 195.851822ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-scheduler-ha-082404
I1216 19:56:33.929572 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-scheduler-ha-082404
I1216 19:56:33.929596 103685 round_trippers.go:469] Request Headers:
I1216 19:56:33.929614 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:33.929623 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:33.932575 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:34.129623 103685 request.go:632] Waited for 196.140859ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:56:34.129677 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404
I1216 19:56:34.129687 103685 round_trippers.go:469] Request Headers:
I1216 19:56:34.129702 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:34.129725 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:34.134442 103685 round_trippers.go:574] Response Status: 200 OK in 4 milliseconds
I1216 19:56:34.135275 103685 pod_ready.go:93] pod "kube-scheduler-ha-082404" in "kube-system" namespace has status "Ready":"True"
I1216 19:56:34.135297 103685 pod_ready.go:82] duration metric: took 401.693834ms for pod "kube-scheduler-ha-082404" in "kube-system" namespace to be "Ready" ...
I1216 19:56:34.135311 103685 pod_ready.go:79] waiting up to 6m0s for pod "kube-scheduler-ha-082404-m02" in "kube-system" namespace to be "Ready" ...
I1216 19:56:34.329710 103685 request.go:632] Waited for 194.325683ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-scheduler-ha-082404-m02
I1216 19:56:34.329851 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/namespaces/kube-system/pods/kube-scheduler-ha-082404-m02
I1216 19:56:34.329885 103685 round_trippers.go:469] Request Headers:
I1216 19:56:34.329903 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:34.329909 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:34.333272 103685 round_trippers.go:574] Response Status: 200 OK in 3 milliseconds
I1216 19:56:34.530313 103685 request.go:632] Waited for 196.337523ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:34.530388 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes/ha-082404-m02
I1216 19:56:34.530402 103685 round_trippers.go:469] Request Headers:
I1216 19:56:34.530412 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:34.530422 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:34.533185 103685 round_trippers.go:574] Response Status: 200 OK in 2 milliseconds
I1216 19:56:34.533763 103685 pod_ready.go:93] pod "kube-scheduler-ha-082404-m02" in "kube-system" namespace has status "Ready":"True"
I1216 19:56:34.533786 103685 pod_ready.go:82] duration metric: took 398.4661ms for pod "kube-scheduler-ha-082404-m02" in "kube-system" namespace to be "Ready" ...
I1216 19:56:34.533801 103685 pod_ready.go:39] duration metric: took 3.600902057s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I1216 19:56:34.533874 103685 system_svc.go:44] waiting for kubelet service to be running ....
I1216 19:56:34.533945 103685 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1216 19:56:34.546365 103685 system_svc.go:56] duration metric: took 12.483958ms WaitForService to wait for kubelet
I1216 19:56:34.546412 103685 kubeadm.go:582] duration metric: took 3.721819048s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1216 19:56:34.546431 103685 node_conditions.go:102] verifying NodePressure condition ...
I1216 19:56:34.729743 103685 request.go:632] Waited for 183.181609ms due to client-side throttling, not priority and fairness, request: GET:https://192.168.49.2:8443/api/v1/nodes
I1216 19:56:34.729803 103685 round_trippers.go:463] GET https://192.168.49.2:8443/api/v1/nodes
I1216 19:56:34.729812 103685 round_trippers.go:469] Request Headers:
I1216 19:56:34.729854 103685 round_trippers.go:473] Accept: application/json, */*
I1216 19:56:34.729861 103685 round_trippers.go:473] User-Agent: minikube-linux-arm64/v0.0.0 (linux/arm64) kubernetes/$Format
I1216 19:56:34.737711 103685 round_trippers.go:574] Response Status: 200 OK in 7 milliseconds
I1216 19:56:34.739565 103685 node_conditions.go:122] node storage ephemeral capacity is 203034800Ki
I1216 19:56:34.739591 103685 node_conditions.go:123] node cpu capacity is 2
I1216 19:56:34.739603 103685 node_conditions.go:122] node storage ephemeral capacity is 203034800Ki
I1216 19:56:34.739607 103685 node_conditions.go:123] node cpu capacity is 2
I1216 19:56:34.739612 103685 node_conditions.go:122] node storage ephemeral capacity is 203034800Ki
I1216 19:56:34.739616 103685 node_conditions.go:123] node cpu capacity is 2
I1216 19:56:34.739621 103685 node_conditions.go:105] duration metric: took 193.18517ms to run NodePressure ...
I1216 19:56:34.739633 103685 start.go:241] waiting for startup goroutines ...
I1216 19:56:34.739659 103685 start.go:255] writing updated cluster config ...
I1216 19:56:34.739983 103685 ssh_runner.go:195] Run: rm -f paused
I1216 19:56:34.807887 103685 start.go:600] kubectl: 1.32.0, cluster: 1.32.0 (minor skew: 0)
I1216 19:56:34.812506 103685 out.go:177] * Done! kubectl is now configured to use "ha-082404" cluster and "default" namespace by default
==> Docker <==
Dec 16 19:54:58 ha-082404 cri-dockerd[1371]: time="2024-12-16T19:54:58Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-668d6bf9bc-9th4p_kube-system\": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container \"9128e494ac0e7020e201e9ac5f60e8e2183d7f1e544435b7726145dba865da7e\""
Dec 16 19:54:58 ha-082404 cri-dockerd[1371]: time="2024-12-16T19:54:58Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-668d6bf9bc-9th4p_kube-system\": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container \"2bea468deff55fcec1295d54e34efbd26f13e5cbe963bf7e3b7c5c4e606cd6db\""
Dec 16 19:54:58 ha-082404 cri-dockerd[1371]: time="2024-12-16T19:54:58Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-668d6bf9bc-mwl2r_kube-system\": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container \"30043c00e646a7738d6e6b290738a97385c910e4be7c7a7368f2e561ae255ed1\""
Dec 16 19:54:58 ha-082404 cri-dockerd[1371]: time="2024-12-16T19:54:58Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-668d6bf9bc-mwl2r_kube-system\": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container \"adc0d3262bbd118238467f291bd8651bbbf2e033337dc9f63de6f42467df04a9\""
Dec 16 19:54:59 ha-082404 cri-dockerd[1371]: time="2024-12-16T19:54:59Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"busybox-58667487b6-f7kww_default\": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container \"104dc58f833120481c3c51f493e3ba3c65ddb41c211aa000892ca3213fc51173\""
Dec 16 19:54:59 ha-082404 cri-dockerd[1371]: time="2024-12-16T19:54:59Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"busybox-58667487b6-f7kww_default\": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container \"bed50a955af114bb190915894779d531b23cc725e97cdfe5c818496e9d1c6773\""
Dec 16 19:55:00 ha-082404 cri-dockerd[1371]: time="2024-12-16T19:55:00Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"busybox-58667487b6-f7kww_default\": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container \"104dc58f833120481c3c51f493e3ba3c65ddb41c211aa000892ca3213fc51173\""
Dec 16 19:55:00 ha-082404 cri-dockerd[1371]: time="2024-12-16T19:55:00Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-668d6bf9bc-9th4p_kube-system\": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container \"9128e494ac0e7020e201e9ac5f60e8e2183d7f1e544435b7726145dba865da7e\""
Dec 16 19:55:00 ha-082404 cri-dockerd[1371]: time="2024-12-16T19:55:00Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-668d6bf9bc-mwl2r_kube-system\": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container \"30043c00e646a7738d6e6b290738a97385c910e4be7c7a7368f2e561ae255ed1\""
Dec 16 19:55:04 ha-082404 cri-dockerd[1371]: time="2024-12-16T19:55:04Z" level=info msg="Both sandbox container and checkpoint could not be found with id \"0959a4005a36568ca29c3f55a632fea8af2d4c86250c1a77542833ba20528be2\". Proceed without further sandbox information."
Dec 16 19:55:04 ha-082404 cri-dockerd[1371]: time="2024-12-16T19:55:04Z" level=info msg="Both sandbox container and checkpoint could not be found with id \"b683c7cd1f99e265fbe6ea7f92bf4a0e7f6831104d4844d765c4fd18477ae8fb\". Proceed without further sandbox information."
Dec 16 19:55:05 ha-082404 cri-dockerd[1371]: time="2024-12-16T19:55:05Z" level=info msg="Both sandbox container and checkpoint could not be found with id \"4de0630835e4c8f083af020fd98362560fd03c0b304ac1b981e903aac6038d75\". Proceed without further sandbox information."
Dec 16 19:55:05 ha-082404 cri-dockerd[1371]: time="2024-12-16T19:55:05Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/e8fa66e19e5c7ae5346465d3c449f010408ba7d6ecc7061affd3258fcfd2e159/resolv.conf as [nameserver 192.168.49.1 search us-east-2.compute.internal options edns0 trust-ad ndots:0]"
Dec 16 19:55:05 ha-082404 cri-dockerd[1371]: time="2024-12-16T19:55:05Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/303e373747b455919bbf27dc34a3e802c07f35418d3aecf6727a960e9492a202/resolv.conf as [nameserver 192.168.49.1 search us-east-2.compute.internal options trust-ad ndots:0 edns0]"
Dec 16 19:55:05 ha-082404 cri-dockerd[1371]: time="2024-12-16T19:55:05Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/c802c6e18bab9dab186b7bdba4604f511326b7e6a42dec6b6ee13bbe10891cdb/resolv.conf as [nameserver 192.168.49.1 search us-east-2.compute.internal options edns0 trust-ad ndots:0]"
Dec 16 19:55:05 ha-082404 cri-dockerd[1371]: time="2024-12-16T19:55:05Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/9c9be92fc010aef94352c80be37841d9105ba84c31eecd151c832c1dab2bb939/resolv.conf as [nameserver 192.168.49.1 search us-east-2.compute.internal options edns0 trust-ad ndots:0]"
Dec 16 19:55:06 ha-082404 cri-dockerd[1371]: time="2024-12-16T19:55:06Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/8082d07116f548d4e5565fdb6083dd0a9abe240f5905a8d22979ade61fbcda3b/resolv.conf as [nameserver 192.168.49.1 search us-east-2.compute.internal options edns0 trust-ad ndots:0]"
Dec 16 19:55:21 ha-082404 dockerd[1061]: time="2024-12-16T19:55:21.455414133Z" level=info msg="ignoring event" container=83f8881a95f7fe902eeeace12861c5eada335956b6149c5a2e9b9fbb28a63be2 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Dec 16 19:55:26 ha-082404 cri-dockerd[1371]: time="2024-12-16T19:55:26Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:10.244.0.0/24,},}"
Dec 16 19:55:27 ha-082404 cri-dockerd[1371]: time="2024-12-16T19:55:27Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/a03474977bba9e8792837697582177debe4fb2f7a9e9c86390cd26f3eb41ca39/resolv.conf as [nameserver 192.168.49.1 search us-east-2.compute.internal options edns0 trust-ad ndots:0]"
Dec 16 19:55:27 ha-082404 cri-dockerd[1371]: time="2024-12-16T19:55:27Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/0d96b49f74b86a805dcc2bfda49c0da56e579ecf8e81578ddc65d236eb470cf2/resolv.conf as [nameserver 192.168.49.1 search us-east-2.compute.internal options edns0 trust-ad ndots:0]"
Dec 16 19:55:27 ha-082404 cri-dockerd[1371]: time="2024-12-16T19:55:27Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/3826d0f3230e1a637323bfd3f4ea6c478c142137b6e3857703539d41567fc8c6/resolv.conf as [nameserver 192.168.49.1 search us-east-2.compute.internal options edns0 trust-ad ndots:0]"
Dec 16 19:55:27 ha-082404 cri-dockerd[1371]: time="2024-12-16T19:55:27Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/5c876d42918e89f7a13d5a2c502a12f219a10bdb241b1d098457a6d766bb9335/resolv.conf as [nameserver 192.168.49.1 search us-east-2.compute.internal options edns0 trust-ad ndots:0]"
Dec 16 19:55:27 ha-082404 cri-dockerd[1371]: time="2024-12-16T19:55:27Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/27e8c579574dec5e7d0f21f871efaa6010a74a84e9074711d9b412aece8a377e/resolv.conf as [nameserver 10.96.0.10 search default.svc.cluster.local svc.cluster.local cluster.local us-east-2.compute.internal options ndots:5]"
Dec 16 19:55:27 ha-082404 cri-dockerd[1371]: time="2024-12-16T19:55:27Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/b135e0ba00451bea659f1cf1be88e708b928feb21010a004a2a660e8dbb8e716/resolv.conf as [nameserver 192.168.49.1 search us-east-2.compute.internal options edns0 trust-ad ndots:0]"
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD
893c11024e002 2f6c962e7b831 27 seconds ago Running coredns 4 b135e0ba00451 coredns-668d6bf9bc-9th4p
617f21ce9a32c 2f6c962e7b831 32 seconds ago Running coredns 4 3826d0f3230e1 coredns-668d6bf9bc-mwl2r
d5a5339e5c899 2be0bcf609c65 41 seconds ago Running kindnet-cni 2 a03474977bba9 kindnet-8nzqx
fdaa9da785d37 ba04bb24b9575 42 seconds ago Running storage-provisioner 4 0d96b49f74b86 storage-provisioner
3ddf05914659e 89a35e2ebb6b9 43 seconds ago Running busybox 2 27e8c579574de busybox-58667487b6-f7kww
6d70bd91fd793 2f50386e20bfd 44 seconds ago Running kube-proxy 2 5c876d42918e8 kube-proxy-x7xbp
35b1fb0a1945d a8d049396f6b8 57 seconds ago Running kube-controller-manager 4 303e373747b45 kube-controller-manager-ha-082404
12cc9372bc39e 334f34d04b9fe About a minute ago Running kube-vip 1 8082d07116f54 kube-vip-ha-082404
d8ab2b1e58da5 c3ff26fb59f37 About a minute ago Running kube-scheduler 2 9c9be92fc010a kube-scheduler-ha-082404
3a57c691a62b7 7fc9d4aa817aa About a minute ago Running etcd 2 c802c6e18bab9 etcd-ha-082404
83f8881a95f7f a8d049396f6b8 About a minute ago Exited kube-controller-manager 3 303e373747b45 kube-controller-manager-ha-082404
938f2f755d1e1 2b5bd0f16085a About a minute ago Running kube-apiserver 2 e8fa66e19e5c7 kube-apiserver-ha-082404
96368e60b6cd3 ba04bb24b9575 2 minutes ago Exited storage-provisioner 3 eec313ef0a43a storage-provisioner
7c8e374b59119 2f50386e20bfd 4 minutes ago Exited kube-proxy 1 740286e2648c8 kube-proxy-x7xbp
fb3fa2313cf97 89a35e2ebb6b9 4 minutes ago Exited busybox 1 104dc58f83312 busybox-58667487b6-f7kww
6a5762f475692 2f6c962e7b831 4 minutes ago Exited coredns 3 30043c00e646a coredns-668d6bf9bc-mwl2r
6210fc1a4717d 2f6c962e7b831 4 minutes ago Exited coredns 3 9128e494ac0e7 coredns-668d6bf9bc-9th4p
1eb8182139986 2be0bcf609c65 4 minutes ago Exited kindnet-cni 1 4f306fc870453 kindnet-8nzqx
446adec279b35 c3ff26fb59f37 5 minutes ago Exited kube-scheduler 1 79184126536be kube-scheduler-ha-082404
67b42087d2405 334f34d04b9fe 5 minutes ago Exited kube-vip 0 21fa012c971bd kube-vip-ha-082404
8396fdc657769 2b5bd0f16085a 5 minutes ago Exited kube-apiserver 1 b9be5e90858c5 kube-apiserver-ha-082404
cebe98bc67ce9 7fc9d4aa817aa 6 minutes ago Exited etcd 1 5f897aff3753b etcd-ha-082404
==> coredns [617f21ce9a32] <==
.:53
[INFO] plugin/reload: Running configuration SHA512 = 9e2996f8cb67ac53e0259ab1f8d615d07d1beb0bd07e6a1e39769c3bf486a905bb991cc47f8d2f14d0d3a90a87dfc625a0b4c524fed169d8158c40657c0694b1
CoreDNS-1.11.3
linux/arm64, go1.21.11, a6338e9
[INFO] 127.0.0.1:33624 - 23686 "HINFO IN 5946540837965325478.1939124093402533604. udp 57 false 512" NXDOMAIN qr,rd,ra 57 0.023049173s
==> coredns [6210fc1a4717] <==
linux/arm64, go1.21.11, a6338e9
[INFO] 127.0.0.1:46502 - 35570 "HINFO IN 5699985272064913748.8046935519882549673. udp 57 false 512" - - 0 6.001566228s
[ERROR] plugin/errors: 2 5699985272064913748.8046935519882549673. HINFO: read udp 10.244.0.3:57093->192.168.49.1:53: i/o timeout
[INFO] 127.0.0.1:48527 - 14267 "HINFO IN 5699985272064913748.8046935519882549673. udp 57 false 512" NXDOMAIN qr,rd,ra 57 4.030660446s
[INFO] plugin/ready: Still waiting on: "kubernetes"
[INFO] 127.0.0.1:55742 - 9085 "HINFO IN 5699985272064913748.8046935519882549673. udp 57 false 512" NXDOMAIN qr,rd,ra 57 2.004828683s
[INFO] 127.0.0.1:53595 - 8649 "HINFO IN 5699985272064913748.8046935519882549673. udp 57 false 512" NXDOMAIN qr,rd,ra 57 0.002796423s
[INFO] plugin/ready: Still waiting on: "kubernetes"
[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229: failed to list *v1.EndpointSlice: Get "https://10.96.0.1:443/apis/discovery.k8s.io/v1/endpointslices?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229: failed to list *v1.Namespace: Get "https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[INFO] plugin/kubernetes: Trace[1187239052]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229 (16-Dec-2024 19:52:01.427) (total time: 30000ms):
Trace[1187239052]: ---"Objects listed" error:Get "https://10.96.0.1:443/apis/discovery.k8s.io/v1/endpointslices?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout 30000ms (19:52:31.428)
Trace[1187239052]: [30.000725026s] [30.000725026s] END
[ERROR] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229: Failed to watch *v1.EndpointSlice: failed to list *v1.EndpointSlice: Get "https://10.96.0.1:443/apis/discovery.k8s.io/v1/endpointslices?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[INFO] plugin/kubernetes: Trace[895270623]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229 (16-Dec-2024 19:52:01.427) (total time: 30001ms):
Trace[895270623]: ---"Objects listed" error:Get "https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout 30001ms (19:52:31.428)
Trace[895270623]: [30.001121303s] [30.001121303s] END
[ERROR] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229: Failed to watch *v1.Namespace: failed to list *v1.Namespace: Get "https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229: failed to list *v1.Service: Get "https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[INFO] plugin/kubernetes: Trace[1413824115]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229 (16-Dec-2024 19:52:01.427) (total time: 30006ms):
Trace[1413824115]: ---"Objects listed" error:Get "https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout 30006ms (19:52:31.434)
Trace[1413824115]: [30.006827404s] [30.006827404s] END
[ERROR] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229: Failed to watch *v1.Service: failed to list *v1.Service: Get "https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[INFO] SIGTERM: Shutting down servers then terminating
[INFO] plugin/health: Going into lameduck mode for 5s
==> coredns [6a5762f47569] <==
linux/arm64, go1.21.11, a6338e9
[INFO] 127.0.0.1:45746 - 10236 "HINFO IN 1388450221469688476.2952753411806229028. udp 57 false 512" - - 0 6.00172819s
[ERROR] plugin/errors: 2 1388450221469688476.2952753411806229028. HINFO: read udp 10.244.0.4:48599->192.168.49.1:53: i/o timeout
[INFO] 127.0.0.1:38502 - 11599 "HINFO IN 1388450221469688476.2952753411806229028. udp 57 false 512" NXDOMAIN qr,rd,ra 57 4.041385082s
[INFO] plugin/ready: Still waiting on: "kubernetes"
[INFO] 127.0.0.1:43462 - 4711 "HINFO IN 1388450221469688476.2952753411806229028. udp 57 false 512" NXDOMAIN qr,rd,ra 57 2.005573289s
[INFO] 127.0.0.1:48789 - 38002 "HINFO IN 1388450221469688476.2952753411806229028. udp 57 false 512" NXDOMAIN qr,rd,ra 57 0.003795173s
[INFO] plugin/ready: Still waiting on: "kubernetes"
[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229: failed to list *v1.Namespace: Get "https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229: failed to list *v1.Service: Get "https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229: failed to list *v1.EndpointSlice: Get "https://10.96.0.1:443/apis/discovery.k8s.io/v1/endpointslices?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[INFO] plugin/kubernetes: Trace[656955583]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229 (16-Dec-2024 19:52:01.425) (total time: 30006ms):
Trace[656955583]: ---"Objects listed" error:Get "https://10.96.0.1:443/apis/discovery.k8s.io/v1/endpointslices?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout 30006ms (19:52:31.431)
Trace[656955583]: [30.006407337s] [30.006407337s] END
[ERROR] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229: Failed to watch *v1.EndpointSlice: failed to list *v1.EndpointSlice: Get "https://10.96.0.1:443/apis/discovery.k8s.io/v1/endpointslices?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[INFO] plugin/kubernetes: Trace[407629884]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229 (16-Dec-2024 19:52:01.425) (total time: 30001ms):
Trace[407629884]: ---"Objects listed" error:Get "https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout 30001ms (19:52:31.426)
Trace[407629884]: [30.001502597s] [30.001502597s] END
[ERROR] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229: Failed to watch *v1.Namespace: failed to list *v1.Namespace: Get "https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[INFO] plugin/kubernetes: Trace[2005610125]: "Reflector ListAndWatch" name:pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229 (16-Dec-2024 19:52:01.424) (total time: 30004ms):
Trace[2005610125]: ---"Objects listed" error:Get "https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout 30002ms (19:52:31.426)
Trace[2005610125]: [30.004141168s] [30.004141168s] END
[ERROR] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.29.3/tools/cache/reflector.go:229: Failed to watch *v1.Service: failed to list *v1.Service: Get "https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout
[INFO] SIGTERM: Shutting down servers then terminating
[INFO] plugin/health: Going into lameduck mode for 5s
==> coredns [893c11024e00] <==
.:53
[INFO] plugin/reload: Running configuration SHA512 = 9e2996f8cb67ac53e0259ab1f8d615d07d1beb0bd07e6a1e39769c3bf486a905bb991cc47f8d2f14d0d3a90a87dfc625a0b4c524fed169d8158c40657c0694b1
CoreDNS-1.11.3
linux/arm64, go1.21.11, a6338e9
[INFO] 127.0.0.1:55418 - 8032 "HINFO IN 2239110289631467918.6529411791828990824. udp 57 false 512" NXDOMAIN qr,rd,ra 57 0.012461126s
==> describe nodes <==
Name: ha-082404
Roles: control-plane
Labels: beta.kubernetes.io/arch=arm64
beta.kubernetes.io/os=linux
kubernetes.io/arch=arm64
kubernetes.io/hostname=ha-082404
kubernetes.io/os=linux
minikube.k8s.io/commit=74e51ab701402ddc00f8ba70f2a2775c7dcd6477
minikube.k8s.io/name=ha-082404
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2024_12_16T19_46_22_0700
minikube.k8s.io/version=v1.34.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///var/run/cri-dockerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Mon, 16 Dec 2024 19:46:18 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: ha-082404
AcquireTime: <unset>
RenewTime: Mon, 16 Dec 2024 19:56:27 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Mon, 16 Dec 2024 19:55:26 +0000 Mon, 16 Dec 2024 19:46:18 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Mon, 16 Dec 2024 19:55:26 +0000 Mon, 16 Dec 2024 19:46:18 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Mon, 16 Dec 2024 19:55:26 +0000 Mon, 16 Dec 2024 19:46:18 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Mon, 16 Dec 2024 19:55:26 +0000 Mon, 16 Dec 2024 19:46:19 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.49.2
Hostname: ha-082404
Capacity:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022296Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022296Ki
pods: 110
System Info:
Machine ID: 4b4782f958ee44199c9ef446eeb3cb98
System UUID: 5d4c52bd-6456-4b8b-b2a7-ff86570014a2
Boot ID: e1bb55ba-ca99-49c9-b685-77652a8efae1
Kernel Version: 5.15.0-1072-aws
OS Image: Ubuntu 22.04.5 LTS
Operating System: linux
Architecture: arm64
Container Runtime Version: docker://27.4.0
Kubelet Version: v1.32.0
Kube-Proxy Version: v1.32.0
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (11 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox-58667487b6-f7kww 0 (0%) 0 (0%) 0 (0%) 0 (0%) 8m40s
kube-system coredns-668d6bf9bc-9th4p 100m (5%) 0 (0%) 70Mi (0%) 170Mi (2%) 10m
kube-system coredns-668d6bf9bc-mwl2r 100m (5%) 0 (0%) 70Mi (0%) 170Mi (2%) 10m
kube-system etcd-ha-082404 100m (5%) 0 (0%) 100Mi (1%) 0 (0%) 10m
kube-system kindnet-8nzqx 100m (5%) 100m (5%) 50Mi (0%) 50Mi (0%) 10m
kube-system kube-apiserver-ha-082404 250m (12%) 0 (0%) 0 (0%) 0 (0%) 10m
kube-system kube-controller-manager-ha-082404 200m (10%) 0 (0%) 0 (0%) 0 (0%) 10m
kube-system kube-proxy-x7xbp 0 (0%) 0 (0%) 0 (0%) 0 (0%) 10m
kube-system kube-scheduler-ha-082404 100m (5%) 0 (0%) 0 (0%) 0 (0%) 10m
kube-system kube-vip-ha-082404 0 (0%) 0 (0%) 0 (0%) 0 (0%) 5m7s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 10m
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 950m (47%) 100m (5%)
memory 290Mi (3%) 390Mi (4%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
hugepages-32Mi 0 (0%) 0 (0%)
hugepages-64Ki 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 4m24s kube-proxy
Normal Starting 43s kube-proxy
Normal Starting 10m kube-proxy
Warning CgroupV1 10m kubelet cgroup v1 support is in maintenance mode, please migrate to cgroup v2
Normal NodeHasSufficientPID 10m kubelet Node ha-082404 status is now: NodeHasSufficientPID
Normal NodeHasSufficientMemory 10m kubelet Node ha-082404 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 10m kubelet Node ha-082404 status is now: NodeHasNoDiskPressure
Normal NodeAllocatableEnforced 10m kubelet Updated Node Allocatable limit across pods
Normal Starting 10m kubelet Starting kubelet.
Normal RegisteredNode 10m node-controller Node ha-082404 event: Registered Node ha-082404 in Controller
Normal RegisteredNode 9m34s node-controller Node ha-082404 event: Registered Node ha-082404 in Controller
Normal RegisteredNode 8m50s node-controller Node ha-082404 event: Registered Node ha-082404 in Controller
Normal NodeHasSufficientPID 6m6s (x7 over 6m6s) kubelet Node ha-082404 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 6m6s kubelet Updated Node Allocatable limit across pods
Warning CgroupV1 6m6s kubelet cgroup v1 support is in maintenance mode, please migrate to cgroup v2
Normal Starting 6m6s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 6m6s (x8 over 6m6s) kubelet Node ha-082404 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 6m6s (x8 over 6m6s) kubelet Node ha-082404 status is now: NodeHasNoDiskPressure
Normal RegisteredNode 5m21s node-controller Node ha-082404 event: Registered Node ha-082404 in Controller
Normal RegisteredNode 5m10s node-controller Node ha-082404 event: Registered Node ha-082404 in Controller
Normal RegisteredNode 3m12s node-controller Node ha-082404 event: Registered Node ha-082404 in Controller
Normal Starting 99s kubelet Starting kubelet.
Warning CgroupV1 99s kubelet cgroup v1 support is in maintenance mode, please migrate to cgroup v2
Normal NodeHasSufficientMemory 99s (x8 over 99s) kubelet Node ha-082404 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 99s (x8 over 99s) kubelet Node ha-082404 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 99s (x7 over 99s) kubelet Node ha-082404 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 99s kubelet Updated Node Allocatable limit across pods
Normal RegisteredNode 53s node-controller Node ha-082404 event: Registered Node ha-082404 in Controller
Normal RegisteredNode 25s node-controller Node ha-082404 event: Registered Node ha-082404 in Controller
Name: ha-082404-m02
Roles: control-plane
Labels: beta.kubernetes.io/arch=arm64
beta.kubernetes.io/os=linux
kubernetes.io/arch=arm64
kubernetes.io/hostname=ha-082404-m02
kubernetes.io/os=linux
minikube.k8s.io/commit=74e51ab701402ddc00f8ba70f2a2775c7dcd6477
minikube.k8s.io/name=ha-082404
minikube.k8s.io/primary=false
minikube.k8s.io/updated_at=2024_12_16T19_46_56_0700
minikube.k8s.io/version=v1.34.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///var/run/cri-dockerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Mon, 16 Dec 2024 19:46:50 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: ha-082404-m02
AcquireTime: <unset>
RenewTime: Mon, 16 Dec 2024 19:56:30 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Mon, 16 Dec 2024 19:55:28 +0000 Mon, 16 Dec 2024 19:46:50 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Mon, 16 Dec 2024 19:55:28 +0000 Mon, 16 Dec 2024 19:46:50 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Mon, 16 Dec 2024 19:55:28 +0000 Mon, 16 Dec 2024 19:46:50 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Mon, 16 Dec 2024 19:55:28 +0000 Mon, 16 Dec 2024 19:46:51 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.49.3
Hostname: ha-082404-m02
Capacity:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022296Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022296Ki
pods: 110
System Info:
Machine ID: f2a12161850a4ea2a451399d7ccade2d
System UUID: c3768833-08bd-45c4-8427-6f0c3f5b0998
Boot ID: e1bb55ba-ca99-49c9-b685-77652a8efae1
Kernel Version: 5.15.0-1072-aws
OS Image: Ubuntu 22.04.5 LTS
Operating System: linux
Architecture: arm64
Container Runtime Version: docker://27.4.0
Kubelet Version: v1.32.0
Kube-Proxy Version: v1.32.0
PodCIDR: 10.244.1.0/24
PodCIDRs: 10.244.1.0/24
Non-terminated Pods: (8 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox-58667487b6-mdgdk 0 (0%) 0 (0%) 0 (0%) 0 (0%) 8m40s
kube-system etcd-ha-082404-m02 100m (5%) 0 (0%) 100Mi (1%) 0 (0%) 9m46s
kube-system kindnet-p6stw 100m (5%) 100m (5%) 50Mi (0%) 50Mi (0%) 9m47s
kube-system kube-apiserver-ha-082404-m02 250m (12%) 0 (0%) 0 (0%) 0 (0%) 9m46s
kube-system kube-controller-manager-ha-082404-m02 200m (10%) 0 (0%) 0 (0%) 0 (0%) 9m46s
kube-system kube-proxy-wmg6k 0 (0%) 0 (0%) 0 (0%) 0 (0%) 9m47s
kube-system kube-scheduler-ha-082404-m02 100m (5%) 0 (0%) 0 (0%) 0 (0%) 9m46s
kube-system kube-vip-ha-082404-m02 0 (0%) 0 (0%) 0 (0%) 0 (0%) 9m46s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 750m (37%) 100m (5%)
memory 150Mi (1%) 50Mi (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
hugepages-32Mi 0 (0%) 0 (0%)
hugepages-64Ki 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 52s kube-proxy
Normal Starting 4m47s kube-proxy
Normal Starting 6m50s kube-proxy
Normal Starting 9m37s kube-proxy
Normal NodeHasSufficientPID 9m48s (x7 over 9m48s) kubelet Node ha-082404-m02 status is now: NodeHasSufficientPID
Normal NodeHasSufficientMemory 9m48s (x8 over 9m48s) kubelet Node ha-082404-m02 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 9m48s (x8 over 9m48s) kubelet Node ha-082404-m02 status is now: NodeHasNoDiskPressure
Normal RegisteredNode 9m43s node-controller Node ha-082404-m02 event: Registered Node ha-082404-m02 in Controller
Normal RegisteredNode 9m34s node-controller Node ha-082404-m02 event: Registered Node ha-082404-m02 in Controller
Normal RegisteredNode 8m50s node-controller Node ha-082404-m02 event: Registered Node ha-082404-m02 in Controller
Normal NodeHasSufficientPID 7m26s (x7 over 7m26s) kubelet Node ha-082404-m02 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 7m26s kubelet Updated Node Allocatable limit across pods
Normal Starting 7m26s kubelet Starting kubelet.
Warning CgroupV1 7m26s kubelet cgroup v1 support is in maintenance mode, please migrate to cgroup v2
Normal NodeHasSufficientMemory 7m26s (x8 over 7m26s) kubelet Node ha-082404-m02 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 7m26s (x8 over 7m26s) kubelet Node ha-082404-m02 status is now: NodeHasNoDiskPressure
Normal Starting 6m4s kubelet Starting kubelet.
Warning CgroupV1 6m4s kubelet cgroup v1 support is in maintenance mode, please migrate to cgroup v2
Normal NodeAllocatableEnforced 6m4s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 6m3s (x8 over 6m4s) kubelet Node ha-082404-m02 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 6m3s (x8 over 6m4s) kubelet Node ha-082404-m02 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 6m3s (x7 over 6m4s) kubelet Node ha-082404-m02 status is now: NodeHasSufficientPID
Normal RegisteredNode 5m21s node-controller Node ha-082404-m02 event: Registered Node ha-082404-m02 in Controller
Normal RegisteredNode 5m10s node-controller Node ha-082404-m02 event: Registered Node ha-082404-m02 in Controller
Normal RegisteredNode 3m12s node-controller Node ha-082404-m02 event: Registered Node ha-082404-m02 in Controller
Normal Starting 96s kubelet Starting kubelet.
Warning CgroupV1 96s kubelet cgroup v1 support is in maintenance mode, please migrate to cgroup v2
Normal NodeHasSufficientMemory 96s (x8 over 96s) kubelet Node ha-082404-m02 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 96s (x8 over 96s) kubelet Node ha-082404-m02 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 96s (x7 over 96s) kubelet Node ha-082404-m02 status is now: NodeHasSufficientPID
Normal NodeAllocatableEnforced 96s kubelet Updated Node Allocatable limit across pods
Normal RegisteredNode 53s node-controller Node ha-082404-m02 event: Registered Node ha-082404-m02 in Controller
Normal RegisteredNode 25s node-controller Node ha-082404-m02 event: Registered Node ha-082404-m02 in Controller
Name: ha-082404-m04
Roles: <none>
Labels: beta.kubernetes.io/arch=arm64
beta.kubernetes.io/os=linux
kubernetes.io/arch=arm64
kubernetes.io/hostname=ha-082404-m04
kubernetes.io/os=linux
minikube.k8s.io/commit=74e51ab701402ddc00f8ba70f2a2775c7dcd6477
minikube.k8s.io/name=ha-082404
minikube.k8s.io/primary=false
minikube.k8s.io/updated_at=2024_12_16T19_48_27_0700
minikube.k8s.io/version=v1.34.0
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///var/run/cri-dockerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Mon, 16 Dec 2024 19:48:26 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: ha-082404-m04
AcquireTime: <unset>
RenewTime: Mon, 16 Dec 2024 19:56:35 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Mon, 16 Dec 2024 19:56:36 +0000 Mon, 16 Dec 2024 19:56:36 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Mon, 16 Dec 2024 19:56:36 +0000 Mon, 16 Dec 2024 19:56:36 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Mon, 16 Dec 2024 19:56:36 +0000 Mon, 16 Dec 2024 19:56:36 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Mon, 16 Dec 2024 19:56:36 +0000 Mon, 16 Dec 2024 19:56:36 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.49.5
Hostname: ha-082404-m04
Capacity:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022296Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022296Ki
pods: 110
System Info:
Machine ID: 1f0b459793e944f5a162701add5f2897
System UUID: ec1dc162-320c-4a8b-904d-db619d30c85c
Boot ID: e1bb55ba-ca99-49c9-b685-77652a8efae1
Kernel Version: 5.15.0-1072-aws
OS Image: Ubuntu 22.04.5 LTS
Operating System: linux
Architecture: arm64
Container Runtime Version: docker://27.4.0
Kubelet Version: v1.32.0
Kube-Proxy Version: v1.32.0
PodCIDR: 10.244.3.0/24
PodCIDRs: 10.244.3.0/24
Non-terminated Pods: (3 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox-58667487b6-2bw6v 0 (0%) 0 (0%) 0 (0%) 0 (0%) 2m32s
kube-system kindnet-m64xz 100m (5%) 100m (5%) 50Mi (0%) 50Mi (0%) 8m11s
kube-system kube-proxy-pvlrj 0 (0%) 0 (0%) 0 (0%) 0 (0%) 8m11s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 100m (5%) 100m (5%)
memory 50Mi (0%) 50Mi (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
hugepages-32Mi 0 (0%) 0 (0%)
hugepages-64Ki 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 8m8s kube-proxy
Normal Starting 2m35s kube-proxy
Normal NodeHasNoDiskPressure 8m11s (x2 over 8m11s) kubelet Node ha-082404-m04 status is now: NodeHasNoDiskPressure
Normal NodeAllocatableEnforced 8m11s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientPID 8m11s (x2 over 8m11s) kubelet Node ha-082404-m04 status is now: NodeHasSufficientPID
Warning CgroupV1 8m11s kubelet cgroup v1 support is in maintenance mode, please migrate to cgroup v2
Normal NodeHasSufficientMemory 8m11s (x2 over 8m11s) kubelet Node ha-082404-m04 status is now: NodeHasSufficientMemory
Normal NodeReady 8m10s kubelet Node ha-082404-m04 status is now: NodeReady
Normal RegisteredNode 8m10s node-controller Node ha-082404-m04 event: Registered Node ha-082404-m04 in Controller
Normal RegisteredNode 8m9s node-controller Node ha-082404-m04 event: Registered Node ha-082404-m04 in Controller
Normal RegisteredNode 8m8s node-controller Node ha-082404-m04 event: Registered Node ha-082404-m04 in Controller
Normal RegisteredNode 5m21s node-controller Node ha-082404-m04 event: Registered Node ha-082404-m04 in Controller
Normal RegisteredNode 5m10s node-controller Node ha-082404-m04 event: Registered Node ha-082404-m04 in Controller
Normal NodeNotReady 4m30s node-controller Node ha-082404-m04 status is now: NodeNotReady
Normal RegisteredNode 3m12s node-controller Node ha-082404-m04 event: Registered Node ha-082404-m04 in Controller
Normal Starting 2m56s kubelet Starting kubelet.
Normal NodeAllocatableEnforced 2m55s kubelet Updated Node Allocatable limit across pods
Warning CgroupV1 2m55s kubelet cgroup v1 support is in maintenance mode, please migrate to cgroup v2
Normal NodeHasSufficientPID 2m49s (x7 over 2m55s) kubelet Node ha-082404-m04 status is now: NodeHasSufficientPID
Normal NodeHasSufficientMemory 2m43s (x8 over 2m55s) kubelet Node ha-082404-m04 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 2m43s (x8 over 2m55s) kubelet Node ha-082404-m04 status is now: NodeHasNoDiskPressure
Normal RegisteredNode 53s node-controller Node ha-082404-m04 event: Registered Node ha-082404-m04 in Controller
Normal RegisteredNode 25s node-controller Node ha-082404-m04 event: Registered Node ha-082404-m04 in Controller
Normal Starting 14s kubelet Starting kubelet.
Warning CgroupV1 14s kubelet cgroup v1 support is in maintenance mode, please migrate to cgroup v2
Normal NodeAllocatableEnforced 14s kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientPID 8s (x7 over 14s) kubelet Node ha-082404-m04 status is now: NodeHasSufficientPID
Normal NodeNotReady 3s node-controller Node ha-082404-m04 status is now: NodeNotReady
Normal NodeHasSufficientMemory 1s (x8 over 14s) kubelet Node ha-082404-m04 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 1s (x8 over 14s) kubelet Node ha-082404-m04 status is now: NodeHasNoDiskPressure
==> dmesg <==
[Dec16 19:17] ACPI: SRAT not present
[ +0.000000] ACPI: SRAT not present
[ +0.000000] SPI driver altr_a10sr has no spi_device_id for altr,a10sr
[ +0.014827] device-mapper: core: CONFIG_IMA_DISABLE_HTABLE is disabled. Duplicate IMA measurements will not be recorded in the IMA log.
[ +0.455673] systemd[1]: Configuration file /run/systemd/system/netplan-ovs-cleanup.service is marked world-inaccessible. This has no effect as configuration data is accessible via APIs without restrictions. Proceeding anyway.
[ +0.026726] systemd[1]: /lib/systemd/system/snapd.service:23: Unknown key name 'RestartMode' in section 'Service', ignoring.
[ +0.031497] systemd[1]: /lib/systemd/system/cloud-init.service:20: Unknown key name 'ConditionEnvironment' in section 'Unit', ignoring.
[ +0.017044] systemd[1]: /lib/systemd/system/cloud-init-hotplugd.socket:11: Unknown key name 'ConditionEnvironment' in section 'Unit', ignoring.
[ +0.631590] ena 0000:00:05.0: LLQ is not supported Fallback to host mode policy.
[ +6.594930] kauditd_printk_skb: 36 callbacks suppressed
==> etcd [3a57c691a62b] <==
{"level":"warn","ts":"2024-12-16T19:55:24.499623Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2024-12-16T19:55:23.576775Z","time spent":"922.836807ms","remote":"127.0.0.1:48584","response type":"/etcdserverpb.KV/Range","request count":0,"request size":57,"response count":0,"response size":29,"request content":"key:\"/registry/ingressclasses/\" range_end:\"/registry/ingressclasses0\" limit:500 "}
{"level":"warn","ts":"2024-12-16T19:55:24.430325Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"927.297414ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/rolebindings/\" range_end:\"/registry/rolebindings0\" limit:500 ","response":"range_response_count:12 size:8695"}
{"level":"info","ts":"2024-12-16T19:55:24.499887Z","caller":"traceutil/trace.go:171","msg":"trace[1713466161] range","detail":"{range_begin:/registry/rolebindings/; range_end:/registry/rolebindings0; response_count:12; response_revision:2559; }","duration":"996.850972ms","start":"2024-12-16T19:55:23.503024Z","end":"2024-12-16T19:55:24.499875Z","steps":["trace[1713466161] 'agreement among raft nodes before linearized reading' (duration: 927.252099ms)"],"step_count":1}
{"level":"warn","ts":"2024-12-16T19:55:24.499957Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2024-12-16T19:55:23.502987Z","time spent":"996.937493ms","remote":"127.0.0.1:48604","response type":"/etcdserverpb.KV/Range","request count":0,"request size":53,"response count":12,"response size":8719,"request content":"key:\"/registry/rolebindings/\" range_end:\"/registry/rolebindings0\" limit:500 "}
{"level":"warn","ts":"2024-12-16T19:55:24.430348Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"938.925208ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/csidrivers/\" range_end:\"/registry/csidrivers0\" limit:500 ","response":"range_response_count:0 size:5"}
{"level":"info","ts":"2024-12-16T19:55:24.500194Z","caller":"traceutil/trace.go:171","msg":"trace[1043412968] range","detail":"{range_begin:/registry/csidrivers/; range_end:/registry/csidrivers0; response_count:0; response_revision:2559; }","duration":"1.008763893s","start":"2024-12-16T19:55:23.491419Z","end":"2024-12-16T19:55:24.500183Z","steps":["trace[1043412968] 'agreement among raft nodes before linearized reading' (duration: 938.91432ms)"],"step_count":1}
{"level":"warn","ts":"2024-12-16T19:55:24.500240Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2024-12-16T19:55:23.491384Z","time spent":"1.008845368s","remote":"127.0.0.1:48660","response type":"/etcdserverpb.KV/Range","request count":0,"request size":49,"response count":0,"response size":29,"request content":"key:\"/registry/csidrivers/\" range_end:\"/registry/csidrivers0\" limit:500 "}
{"level":"warn","ts":"2024-12-16T19:55:24.430366Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"982.134204ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/validatingadmissionpolicies/\" range_end:\"/registry/validatingadmissionpolicies0\" limit:500 ","response":"range_response_count:0 size:5"}
{"level":"info","ts":"2024-12-16T19:55:24.525276Z","caller":"traceutil/trace.go:171","msg":"trace[1677327849] range","detail":"{range_begin:/registry/validatingadmissionpolicies/; range_end:/registry/validatingadmissionpolicies0; response_count:0; response_revision:2559; }","duration":"1.077034411s","start":"2024-12-16T19:55:23.448228Z","end":"2024-12-16T19:55:24.525262Z","steps":["trace[1677327849] 'agreement among raft nodes before linearized reading' (duration: 982.127697ms)"],"step_count":1}
{"level":"warn","ts":"2024-12-16T19:55:24.525349Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2024-12-16T19:55:23.448197Z","time spent":"1.077128603s","remote":"127.0.0.1:48760","response type":"/etcdserverpb.KV/Range","request count":0,"request size":83,"response count":0,"response size":29,"request content":"key:\"/registry/validatingadmissionpolicies/\" range_end:\"/registry/validatingadmissionpolicies0\" limit:500 "}
{"level":"warn","ts":"2024-12-16T19:55:24.430413Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"992.220177ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/secrets/\" range_end:\"/registry/secrets0\" limit:500 ","response":"range_response_count:4 size:2585"}
{"level":"info","ts":"2024-12-16T19:55:24.525509Z","caller":"traceutil/trace.go:171","msg":"trace[1080619265] range","detail":"{range_begin:/registry/secrets/; range_end:/registry/secrets0; response_count:4; response_revision:2559; }","duration":"1.087313494s","start":"2024-12-16T19:55:23.438188Z","end":"2024-12-16T19:55:24.525502Z","steps":["trace[1080619265] 'agreement among raft nodes before linearized reading' (duration: 992.185478ms)"],"step_count":1}
{"level":"warn","ts":"2024-12-16T19:55:24.525535Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2024-12-16T19:55:23.438159Z","time spent":"1.087366022s","remote":"127.0.0.1:48336","response type":"/etcdserverpb.KV/Range","request count":0,"request size":43,"response count":4,"response size":2609,"request content":"key:\"/registry/secrets/\" range_end:\"/registry/secrets0\" limit:500 "}
{"level":"warn","ts":"2024-12-16T19:55:24.430450Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"1.188462634s","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/priorityclasses/system-node-critical\" limit:1 ","response":"range_response_count:1 size:442"}
{"level":"info","ts":"2024-12-16T19:55:24.525743Z","caller":"traceutil/trace.go:171","msg":"trace[1357142503] range","detail":"{range_begin:/registry/priorityclasses/system-node-critical; range_end:; response_count:1; response_revision:2559; }","duration":"1.283751375s","start":"2024-12-16T19:55:23.241984Z","end":"2024-12-16T19:55:24.525735Z","steps":["trace[1357142503] 'agreement among raft nodes before linearized reading' (duration: 1.188438454s)"],"step_count":1}
{"level":"warn","ts":"2024-12-16T19:55:24.525770Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2024-12-16T19:55:23.241970Z","time spent":"1.283790209s","remote":"127.0.0.1:48628","response type":"/etcdserverpb.KV/Range","request count":0,"request size":50,"response count":1,"response size":466,"request content":"key:\"/registry/priorityclasses/system-node-critical\" limit:1 "}
{"level":"warn","ts":"2024-12-16T19:55:24.430468Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"1.188615679s","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/clusterroles\" limit:1 ","response":"range_response_count:0 size:5"}
{"level":"info","ts":"2024-12-16T19:55:24.529601Z","caller":"traceutil/trace.go:171","msg":"trace[1658249732] range","detail":"{range_begin:/registry/clusterroles; range_end:; response_count:0; response_revision:2559; }","duration":"1.287735747s","start":"2024-12-16T19:55:23.241849Z","end":"2024-12-16T19:55:24.529585Z","steps":["trace[1658249732] 'agreement among raft nodes before linearized reading' (duration: 1.188608656s)"],"step_count":1}
{"level":"warn","ts":"2024-12-16T19:55:24.533718Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2024-12-16T19:55:23.241800Z","time spent":"1.29188782s","remote":"127.0.0.1:48608","response type":"/etcdserverpb.KV/Range","request count":0,"request size":26,"response count":0,"response size":29,"request content":"key:\"/registry/clusterroles\" limit:1 "}
{"level":"warn","ts":"2024-12-16T19:55:24.430502Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"1.616093415s","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/leases/kube-system/apiserver-lhvlhw2affhkalxzqnql7m47si\" limit:1 ","response":"range_response_count:1 size:688"}
{"level":"info","ts":"2024-12-16T19:55:24.534154Z","caller":"traceutil/trace.go:171","msg":"trace[1358004048] range","detail":"{range_begin:/registry/leases/kube-system/apiserver-lhvlhw2affhkalxzqnql7m47si; range_end:; response_count:1; response_revision:2559; }","duration":"1.719733938s","start":"2024-12-16T19:55:22.814405Z","end":"2024-12-16T19:55:24.534138Z","steps":["trace[1358004048] 'agreement among raft nodes before linearized reading' (duration: 1.616070228s)"],"step_count":1}
{"level":"warn","ts":"2024-12-16T19:55:24.534188Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2024-12-16T19:55:22.814375Z","time spent":"1.719799659s","remote":"127.0.0.1:48542","response type":"/etcdserverpb.KV/Range","request count":0,"request size":69,"response count":1,"response size":712,"request content":"key:\"/registry/leases/kube-system/apiserver-lhvlhw2affhkalxzqnql7m47si\" limit:1 "}
{"level":"warn","ts":"2024-12-16T19:55:24.430537Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"1.809944161s","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/ranges/serviceips\" limit:1 ","response":"range_response_count:1 size:116"}
{"level":"info","ts":"2024-12-16T19:55:24.534380Z","caller":"traceutil/trace.go:171","msg":"trace[1569502169] range","detail":"{range_begin:/registry/ranges/serviceips; range_end:; response_count:1; response_revision:2559; }","duration":"1.913780362s","start":"2024-12-16T19:55:22.620589Z","end":"2024-12-16T19:55:24.534369Z","steps":["trace[1569502169] 'agreement among raft nodes before linearized reading' (duration: 1.809921088s)"],"step_count":1}
{"level":"warn","ts":"2024-12-16T19:55:24.534405Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2024-12-16T19:55:22.620577Z","time spent":"1.913819212s","remote":"127.0.0.1:48272","response type":"/etcdserverpb.KV/Range","request count":0,"request size":31,"response count":1,"response size":140,"request content":"key:\"/registry/ranges/serviceips\" limit:1 "}
==> etcd [cebe98bc67ce] <==
{"level":"warn","ts":"2024-12-16T19:54:39.405253Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"1.109558006s","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/validatingwebhookconfigurations/\" range_end:\"/registry/validatingwebhookconfigurations0\" count_only:true ","response":"","error":"context canceled"}
{"level":"info","ts":"2024-12-16T19:54:39.405266Z","caller":"traceutil/trace.go:171","msg":"trace[1483849555] range","detail":"{range_begin:/registry/validatingwebhookconfigurations/; range_end:/registry/validatingwebhookconfigurations0; }","duration":"1.109571314s","start":"2024-12-16T19:54:38.295689Z","end":"2024-12-16T19:54:39.405261Z","steps":["trace[1483849555] 'agreement among raft nodes before linearized reading' (duration: 1.10911642s)"],"step_count":1}
{"level":"warn","ts":"2024-12-16T19:54:39.405285Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2024-12-16T19:54:38.295653Z","time spent":"1.109621553s","remote":"127.0.0.1:52278","response type":"/etcdserverpb.KV/Range","request count":0,"request size":90,"response count":0,"response size":0,"request content":"key:\"/registry/validatingwebhookconfigurations/\" range_end:\"/registry/validatingwebhookconfigurations0\" count_only:true "}
2024/12/16 19:54:39 WARNING: [core] [Server #6] grpc: Server.processUnaryRPC failed to write status: connection error: desc = "transport is closing"
{"level":"warn","ts":"2024-12-16T19:54:39.405317Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"7.807622518s","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/validatingadmissionpolicies/\" range_end:\"/registry/validatingadmissionpolicies0\" count_only:true ","response":"","error":"context canceled"}
{"level":"info","ts":"2024-12-16T19:54:39.405337Z","caller":"traceutil/trace.go:171","msg":"trace[524508747] range","detail":"{range_begin:/registry/validatingadmissionpolicies/; range_end:/registry/validatingadmissionpolicies0; }","duration":"7.807642915s","start":"2024-12-16T19:54:31.597689Z","end":"2024-12-16T19:54:39.405332Z","steps":["trace[524508747] 'agreement among raft nodes before linearized reading' (duration: 7.807124244s)"],"step_count":1}
{"level":"warn","ts":"2024-12-16T19:54:39.405351Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2024-12-16T19:54:31.597637Z","time spent":"7.807709858s","remote":"127.0.0.1:52308","response type":"/etcdserverpb.KV/Range","request count":0,"request size":82,"response count":0,"response size":0,"request content":"key:\"/registry/validatingadmissionpolicies/\" range_end:\"/registry/validatingadmissionpolicies0\" count_only:true "}
2024/12/16 19:54:39 WARNING: [core] [Server #6] grpc: Server.processUnaryRPC failed to write status: connection error: desc = "transport is closing"
{"level":"warn","ts":"2024-12-16T19:54:39.405385Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"9.610930852s","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/cronjobs/\" range_end:\"/registry/cronjobs0\" count_only:true ","response":"","error":"context canceled"}
{"level":"info","ts":"2024-12-16T19:54:39.405406Z","caller":"traceutil/trace.go:171","msg":"trace[1517117004] range","detail":"{range_begin:/registry/cronjobs/; range_end:/registry/cronjobs0; }","duration":"9.610944734s","start":"2024-12-16T19:54:29.794449Z","end":"2024-12-16T19:54:39.405394Z","steps":["trace[1517117004] 'agreement among raft nodes before linearized reading' (duration: 9.610371501s)"],"step_count":1}
{"level":"warn","ts":"2024-12-16T19:54:39.405420Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2024-12-16T19:54:29.794394Z","time spent":"9.611021302s","remote":"127.0.0.1:51990","response type":"/etcdserverpb.KV/Range","request count":0,"request size":44,"response count":0,"response size":0,"request content":"key:\"/registry/cronjobs/\" range_end:\"/registry/cronjobs0\" count_only:true "}
2024/12/16 19:54:39 WARNING: [core] [Server #6] grpc: Server.processUnaryRPC failed to write status: connection error: desc = "transport is closing"
{"level":"warn","ts":"2024-12-16T19:54:39.452176Z","caller":"embed/serve.go:212","msg":"stopping secure grpc server due to error","error":"accept tcp 192.168.49.2:2379: use of closed network connection"}
{"level":"warn","ts":"2024-12-16T19:54:39.452224Z","caller":"embed/serve.go:214","msg":"stopped secure grpc server due to error","error":"accept tcp 192.168.49.2:2379: use of closed network connection"}
{"level":"info","ts":"2024-12-16T19:54:39.452275Z","caller":"etcdserver/server.go:1534","msg":"skipped leadership transfer; local server is not leader","local-member-id":"aec36adc501070cc","current-leader-member-id":"0"}
{"level":"info","ts":"2024-12-16T19:54:39.452412Z","caller":"rafthttp/peer.go:330","msg":"stopping remote peer","remote-peer-id":"e74003b2f6d37ab0"}
{"level":"info","ts":"2024-12-16T19:54:39.452427Z","caller":"rafthttp/stream.go:294","msg":"stopped TCP streaming connection with remote peer","stream-writer-type":"stream MsgApp v2","remote-peer-id":"e74003b2f6d37ab0"}
{"level":"info","ts":"2024-12-16T19:54:39.452447Z","caller":"rafthttp/stream.go:294","msg":"stopped TCP streaming connection with remote peer","stream-writer-type":"stream Message","remote-peer-id":"e74003b2f6d37ab0"}
{"level":"info","ts":"2024-12-16T19:54:39.452473Z","caller":"rafthttp/pipeline.go:85","msg":"stopped HTTP pipelining with remote peer","local-member-id":"aec36adc501070cc","remote-peer-id":"e74003b2f6d37ab0"}
{"level":"info","ts":"2024-12-16T19:54:39.452525Z","caller":"rafthttp/stream.go:442","msg":"stopped stream reader with remote peer","stream-reader-type":"stream MsgApp v2","local-member-id":"aec36adc501070cc","remote-peer-id":"e74003b2f6d37ab0"}
{"level":"info","ts":"2024-12-16T19:54:39.452562Z","caller":"rafthttp/stream.go:442","msg":"stopped stream reader with remote peer","stream-reader-type":"stream Message","local-member-id":"aec36adc501070cc","remote-peer-id":"e74003b2f6d37ab0"}
{"level":"info","ts":"2024-12-16T19:54:39.452572Z","caller":"rafthttp/peer.go:335","msg":"stopped remote peer","remote-peer-id":"e74003b2f6d37ab0"}
{"level":"info","ts":"2024-12-16T19:54:39.456203Z","caller":"embed/etcd.go:582","msg":"stopping serving peer traffic","address":"192.168.49.2:2380"}
{"level":"info","ts":"2024-12-16T19:54:39.456347Z","caller":"embed/etcd.go:587","msg":"stopped serving peer traffic","address":"192.168.49.2:2380"}
{"level":"info","ts":"2024-12-16T19:54:39.456375Z","caller":"embed/etcd.go:380","msg":"closed etcd server","name":"ha-082404","data-dir":"/var/lib/minikube/etcd","advertise-peer-urls":["https://192.168.49.2:2380"],"advertise-client-urls":["https://192.168.49.2:2379"]}
==> kernel <==
19:56:37 up 39 min, 0 users, load average: 2.31, 2.89, 2.41
Linux ha-082404 5.15.0-1072-aws #78~20.04.1-Ubuntu SMP Wed Oct 9 15:29:54 UTC 2024 aarch64 aarch64 aarch64 GNU/Linux
PRETTY_NAME="Ubuntu 22.04.5 LTS"
==> kindnet [1eb818213998] <==
I1216 19:54:08.919128 1 main.go:301] handling current node
I1216 19:54:08.919147 1 main.go:297] Handling node with IPs: map[192.168.49.3:{}]
I1216 19:54:08.919181 1 main.go:324] Node ha-082404-m02 has CIDR [10.244.1.0/24]
I1216 19:54:08.919390 1 main.go:297] Handling node with IPs: map[192.168.49.4:{}]
I1216 19:54:08.919412 1 main.go:324] Node ha-082404-m03 has CIDR [10.244.2.0/24]
I1216 19:54:08.919596 1 main.go:297] Handling node with IPs: map[192.168.49.5:{}]
I1216 19:54:08.919616 1 main.go:324] Node ha-082404-m04 has CIDR [10.244.3.0/24]
I1216 19:54:18.921207 1 main.go:297] Handling node with IPs: map[192.168.49.3:{}]
I1216 19:54:18.921307 1 main.go:324] Node ha-082404-m02 has CIDR [10.244.1.0/24]
I1216 19:54:18.921519 1 main.go:297] Handling node with IPs: map[192.168.49.5:{}]
I1216 19:54:18.921538 1 main.go:324] Node ha-082404-m04 has CIDR [10.244.3.0/24]
I1216 19:54:18.921708 1 main.go:297] Handling node with IPs: map[192.168.49.2:{}]
I1216 19:54:18.921724 1 main.go:301] handling current node
I1216 19:54:28.919262 1 main.go:297] Handling node with IPs: map[192.168.49.3:{}]
I1216 19:54:28.919299 1 main.go:324] Node ha-082404-m02 has CIDR [10.244.1.0/24]
I1216 19:54:28.919483 1 main.go:297] Handling node with IPs: map[192.168.49.5:{}]
I1216 19:54:28.919494 1 main.go:324] Node ha-082404-m04 has CIDR [10.244.3.0/24]
I1216 19:54:28.919563 1 main.go:297] Handling node with IPs: map[192.168.49.2:{}]
I1216 19:54:28.919570 1 main.go:301] handling current node
I1216 19:54:38.926805 1 main.go:297] Handling node with IPs: map[192.168.49.2:{}]
I1216 19:54:38.926872 1 main.go:301] handling current node
I1216 19:54:38.926894 1 main.go:297] Handling node with IPs: map[192.168.49.3:{}]
I1216 19:54:38.926901 1 main.go:324] Node ha-082404-m02 has CIDR [10.244.1.0/24]
I1216 19:54:38.934582 1 main.go:297] Handling node with IPs: map[192.168.49.5:{}]
I1216 19:54:38.934621 1 main.go:324] Node ha-082404-m04 has CIDR [10.244.3.0/24]
==> kindnet [d5a5339e5c89] <==
I1216 19:56:07.518650 1 main.go:301] handling current node
I1216 19:56:07.522146 1 main.go:297] Handling node with IPs: map[192.168.49.3:{}]
I1216 19:56:07.522182 1 main.go:324] Node ha-082404-m02 has CIDR [10.244.1.0/24]
I1216 19:56:07.522340 1 routes.go:62] Adding route {Ifindex: 0 Dst: 10.244.1.0/24 Src: <nil> Gw: 192.168.49.3 Flags: [] Table: 0 Realm: 0}
I1216 19:56:07.522413 1 main.go:297] Handling node with IPs: map[192.168.49.5:{}]
I1216 19:56:07.522421 1 main.go:324] Node ha-082404-m04 has CIDR [10.244.3.0/24]
I1216 19:56:07.522472 1 routes.go:62] Adding route {Ifindex: 0 Dst: 10.244.3.0/24 Src: <nil> Gw: 192.168.49.5 Flags: [] Table: 0 Realm: 0}
I1216 19:56:17.518554 1 main.go:297] Handling node with IPs: map[192.168.49.2:{}]
I1216 19:56:17.518590 1 main.go:301] handling current node
I1216 19:56:17.518607 1 main.go:297] Handling node with IPs: map[192.168.49.3:{}]
I1216 19:56:17.518614 1 main.go:324] Node ha-082404-m02 has CIDR [10.244.1.0/24]
I1216 19:56:17.518906 1 main.go:297] Handling node with IPs: map[192.168.49.5:{}]
I1216 19:56:17.518923 1 main.go:324] Node ha-082404-m04 has CIDR [10.244.3.0/24]
I1216 19:56:27.519682 1 main.go:297] Handling node with IPs: map[192.168.49.3:{}]
I1216 19:56:27.519937 1 main.go:324] Node ha-082404-m02 has CIDR [10.244.1.0/24]
I1216 19:56:27.520207 1 main.go:297] Handling node with IPs: map[192.168.49.5:{}]
I1216 19:56:27.520245 1 main.go:324] Node ha-082404-m04 has CIDR [10.244.3.0/24]
I1216 19:56:27.520360 1 main.go:297] Handling node with IPs: map[192.168.49.2:{}]
I1216 19:56:27.520394 1 main.go:301] handling current node
I1216 19:56:37.518984 1 main.go:297] Handling node with IPs: map[192.168.49.5:{}]
I1216 19:56:37.519018 1 main.go:324] Node ha-082404-m04 has CIDR [10.244.3.0/24]
I1216 19:56:37.519293 1 main.go:297] Handling node with IPs: map[192.168.49.2:{}]
I1216 19:56:37.519312 1 main.go:301] handling current node
I1216 19:56:37.519325 1 main.go:297] Handling node with IPs: map[192.168.49.3:{}]
I1216 19:56:37.519332 1 main.go:324] Node ha-082404-m02 has CIDR [10.244.1.0/24]
==> kube-apiserver [8396fdc65776] <==
W1216 19:54:48.592065 1 logging.go:55] [core] [Channel #139 SubChannel #140]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1216 19:54:48.601651 1 logging.go:55] [core] [Channel #112 SubChannel #113]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1216 19:54:48.626584 1 logging.go:55] [core] [Channel #163 SubChannel #164]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1216 19:54:48.630262 1 logging.go:55] [core] [Channel #175 SubChannel #176]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1216 19:54:48.744659 1 logging.go:55] [core] [Channel #103 SubChannel #104]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1216 19:54:48.759050 1 logging.go:55] [core] [Channel #109 SubChannel #110]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1216 19:54:48.786973 1 logging.go:55] [core] [Channel #22 SubChannel #23]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1216 19:54:48.792435 1 logging.go:55] [core] [Channel #40 SubChannel #41]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1216 19:54:48.813269 1 logging.go:55] [core] [Channel #121 SubChannel #122]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1216 19:54:48.844353 1 logging.go:55] [core] [Channel #106 SubChannel #107]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1216 19:54:48.849794 1 logging.go:55] [core] [Channel #61 SubChannel #62]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1216 19:54:48.897503 1 logging.go:55] [core] [Channel #172 SubChannel #173]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1216 19:54:48.916387 1 logging.go:55] [core] [Channel #1 SubChannel #3]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1216 19:54:48.925840 1 logging.go:55] [core] [Channel #34 SubChannel #35]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1216 19:54:48.954290 1 logging.go:55] [core] [Channel #154 SubChannel #155]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1216 19:54:49.017300 1 logging.go:55] [core] [Channel #10 SubChannel #11]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1216 19:54:49.089791 1 logging.go:55] [core] [Channel #70 SubChannel #71]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1216 19:54:49.176605 1 logging.go:55] [core] [Channel #160 SubChannel #161]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1216 19:54:49.218401 1 logging.go:55] [core] [Channel #2 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1216 19:54:49.296152 1 logging.go:55] [core] [Channel #124 SubChannel #125]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1216 19:54:49.323988 1 logging.go:55] [core] [Channel #17 SubChannel #18]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1216 19:54:49.377521 1 logging.go:55] [core] [Channel #115 SubChannel #116]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1216 19:54:49.456849 1 logging.go:55] [core] [Channel #37 SubChannel #38]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1216 19:54:49.461326 1 logging.go:55] [core] [Channel #133 SubChannel #134]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
W1216 19:54:49.468892 1 logging.go:55] [core] [Channel #85 SubChannel #86]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", }. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: connect: connection refused"
==> kube-apiserver [938f2f755d1e] <==
E1216 19:55:24.385229 1 cacher.go:478] cacher (resourcequotas): unexpected ListAndWatch error: failed to list *core.ResourceQuota: etcdserver: leader changed; reinitializing...
E1216 19:55:24.385124 1 watcher.go:342] watch chan error: etcdserver: no leader
I1216 19:55:24.451119 1 cache.go:39] Caches are synced for LocalAvailability controller
I1216 19:55:24.452147 1 cache.go:39] Caches are synced for RemoteAvailability controller
I1216 19:55:24.452308 1 apf_controller.go:382] Running API Priority and Fairness config worker
I1216 19:55:24.452391 1 apf_controller.go:385] Running API Priority and Fairness periodic rebalancing process
I1216 19:55:24.483230 1 shared_informer.go:320] Caches are synced for cluster_authentication_trust_controller
I1216 19:55:24.489578 1 cache.go:39] Caches are synced for autoregister controller
I1216 19:55:24.541209 1 cache.go:39] Caches are synced for APIServiceRegistrationController controller
I1216 19:55:24.561152 1 shared_informer.go:320] Caches are synced for *generic.policySource[*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicy,*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicyBinding,k8s.io/apiserver/pkg/admission/plugin/policy/validating.Validator]
I1216 19:55:24.561376 1 policy_source.go:240] refreshing policies
I1216 19:55:24.564516 1 handler_discovery.go:451] Starting ResourceDiscoveryManager
I1216 19:55:24.595425 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist.
W1216 19:55:24.619617 1 lease.go:265] Resetting endpoints for master service "kubernetes" to [192.168.49.3]
I1216 19:55:24.621787 1 shared_informer.go:320] Caches are synced for node_authorizer
I1216 19:55:24.621930 1 controller.go:615] quota admission added evaluator for: endpoints
I1216 19:55:24.646764 1 controller.go:615] quota admission added evaluator for: leases.coordination.k8s.io
I1216 19:55:24.703660 1 controller.go:615] quota admission added evaluator for: endpointslices.discovery.k8s.io
E1216 19:55:24.732216 1 controller.go:95] Found stale data, removed previous endpoints on kubernetes service, apiserver didn't exit successfully previously
I1216 19:55:24.849533 1 shared_informer.go:320] Caches are synced for configmaps
W1216 19:55:25.347548 1 lease.go:265] Resetting endpoints for master service "kubernetes" to [192.168.49.2 192.168.49.3]
I1216 19:55:26.792490 1 controller.go:615] quota admission added evaluator for: serviceaccounts
I1216 19:55:45.170646 1 controller.go:615] quota admission added evaluator for: daemonsets.apps
I1216 19:55:45.281611 1 controller.go:615] quota admission added evaluator for: replicasets.apps
I1216 19:55:45.363145 1 controller.go:615] quota admission added evaluator for: deployments.apps
==> kube-controller-manager [35b1fb0a1945] <==
E1216 19:56:24.912210 1 gc_controller.go:151] "Failed to get node" err="node \"ha-082404-m03\" not found" logger="pod-garbage-collector-controller" node="ha-082404-m03"
E1216 19:56:24.912219 1 gc_controller.go:151] "Failed to get node" err="node \"ha-082404-m03\" not found" logger="pod-garbage-collector-controller" node="ha-082404-m03"
E1216 19:56:24.912231 1 gc_controller.go:151] "Failed to get node" err="node \"ha-082404-m03\" not found" logger="pod-garbage-collector-controller" node="ha-082404-m03"
E1216 19:56:24.912239 1 gc_controller.go:151] "Failed to get node" err="node \"ha-082404-m03\" not found" logger="pod-garbage-collector-controller" node="ha-082404-m03"
I1216 19:56:24.926811 1 gc_controller.go:342] "PodGC is force deleting Pod" logger="pod-garbage-collector-controller" pod="kube-system/kube-controller-manager-ha-082404-m03"
I1216 19:56:24.969401 1 gc_controller.go:258] "Forced deletion of orphaned Pod succeeded" logger="pod-garbage-collector-controller" pod="kube-system/kube-controller-manager-ha-082404-m03"
I1216 19:56:24.969432 1 gc_controller.go:342] "PodGC is force deleting Pod" logger="pod-garbage-collector-controller" pod="kube-system/etcd-ha-082404-m03"
I1216 19:56:25.029367 1 gc_controller.go:258] "Forced deletion of orphaned Pod succeeded" logger="pod-garbage-collector-controller" pod="kube-system/etcd-ha-082404-m03"
I1216 19:56:25.029624 1 gc_controller.go:342] "PodGC is force deleting Pod" logger="pod-garbage-collector-controller" pod="kube-system/kindnet-f7n6r"
I1216 19:56:25.067323 1 gc_controller.go:258] "Forced deletion of orphaned Pod succeeded" logger="pod-garbage-collector-controller" pod="kube-system/kindnet-f7n6r"
I1216 19:56:25.067573 1 gc_controller.go:342] "PodGC is force deleting Pod" logger="pod-garbage-collector-controller" pod="kube-system/kube-vip-ha-082404-m03"
I1216 19:56:25.102062 1 gc_controller.go:258] "Forced deletion of orphaned Pod succeeded" logger="pod-garbage-collector-controller" pod="kube-system/kube-vip-ha-082404-m03"
I1216 19:56:25.102092 1 gc_controller.go:342] "PodGC is force deleting Pod" logger="pod-garbage-collector-controller" pod="kube-system/kube-scheduler-ha-082404-m03"
I1216 19:56:25.158546 1 gc_controller.go:258] "Forced deletion of orphaned Pod succeeded" logger="pod-garbage-collector-controller" pod="kube-system/kube-scheduler-ha-082404-m03"
I1216 19:56:25.158579 1 gc_controller.go:342] "PodGC is force deleting Pod" logger="pod-garbage-collector-controller" pod="kube-system/kube-proxy-kr525"
I1216 19:56:25.204077 1 gc_controller.go:258] "Forced deletion of orphaned Pod succeeded" logger="pod-garbage-collector-controller" pod="kube-system/kube-proxy-kr525"
I1216 19:56:25.204305 1 gc_controller.go:342] "PodGC is force deleting Pod" logger="pod-garbage-collector-controller" pod="kube-system/kube-apiserver-ha-082404-m03"
I1216 19:56:25.261866 1 gc_controller.go:258] "Forced deletion of orphaned Pod succeeded" logger="pod-garbage-collector-controller" pod="kube-system/kube-apiserver-ha-082404-m03"
I1216 19:56:34.912839 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="ha-082404-m04"
I1216 19:56:34.944221 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="ha-082404-m04"
I1216 19:56:34.994389 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="default/busybox-58667487b6" duration="36.613604ms"
I1216 19:56:34.994508 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="default/busybox-58667487b6" duration="83.682µs"
I1216 19:56:36.148753 1 topologycache.go:237] "Can't get CPU or zone information for node" logger="endpointslice-controller" node="ha-082404-m04"
I1216 19:56:36.148818 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="ha-082404-m04"
I1216 19:56:36.161548 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="ha-082404-m04"
==> kube-controller-manager [83f8881a95f7] <==
I1216 19:55:08.421394 1 serving.go:386] Generated self-signed cert in-memory
I1216 19:55:10.376435 1 controllermanager.go:185] "Starting" version="v1.32.0"
I1216 19:55:10.376698 1 controllermanager.go:187] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1216 19:55:10.382278 1 dynamic_cafile_content.go:161] "Starting controller" name="request-header::/var/lib/minikube/certs/front-proxy-ca.crt"
I1216 19:55:10.382618 1 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt"
I1216 19:55:10.383341 1 secure_serving.go:213] Serving securely on 127.0.0.1:10257
I1216 19:55:10.383506 1 tlsconfig.go:243] "Starting DynamicServingCertificateController"
E1216 19:55:21.425586 1 controllermanager.go:230] "Error building controller context" err="failed to wait for apiserver being healthy: timed out waiting for the condition: failed to get apiserver /healthz status: forbidden: User \"system:kube-controller-manager\" cannot get path \"/healthz\""
==> kube-proxy [6d70bd91fd79] <==
I1216 19:55:53.916535 1 server_linux.go:66] "Using iptables proxy"
I1216 19:55:54.007982 1 server.go:698] "Successfully retrieved node IP(s)" IPs=["192.168.49.2"]
E1216 19:55:54.008076 1 server.go:234] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I1216 19:55:54.032062 1 server.go:243] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I1216 19:55:54.032273 1 server_linux.go:170] "Using iptables Proxier"
I1216 19:55:54.034365 1 proxier.go:255] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I1216 19:55:54.034931 1 server.go:497] "Version info" version="v1.32.0"
I1216 19:55:54.034960 1 server.go:499] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1216 19:55:54.040067 1 config.go:329] "Starting node config controller"
I1216 19:55:54.040092 1 shared_informer.go:313] Waiting for caches to sync for node config
I1216 19:55:54.040691 1 config.go:199] "Starting service config controller"
I1216 19:55:54.040714 1 shared_informer.go:313] Waiting for caches to sync for service config
I1216 19:55:54.040820 1 config.go:105] "Starting endpoint slice config controller"
I1216 19:55:54.040839 1 shared_informer.go:313] Waiting for caches to sync for endpoint slice config
I1216 19:55:54.140809 1 shared_informer.go:320] Caches are synced for service config
I1216 19:55:54.140961 1 shared_informer.go:320] Caches are synced for endpoint slice config
I1216 19:55:54.141106 1 shared_informer.go:320] Caches are synced for node config
==> kube-proxy [7c8e374b5911] <==
I1216 19:52:12.330957 1 server_linux.go:66] "Using iptables proxy"
I1216 19:52:12.437917 1 server.go:698] "Successfully retrieved node IP(s)" IPs=["192.168.49.2"]
E1216 19:52:12.438065 1 server.go:234] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I1216 19:52:12.457169 1 server.go:243] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I1216 19:52:12.457233 1 server_linux.go:170] "Using iptables Proxier"
I1216 19:52:12.459160 1 proxier.go:255] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I1216 19:52:12.459581 1 server.go:497] "Version info" version="v1.32.0"
I1216 19:52:12.459607 1 server.go:499] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1216 19:52:12.468636 1 config.go:199] "Starting service config controller"
I1216 19:52:12.468666 1 shared_informer.go:313] Waiting for caches to sync for service config
I1216 19:52:12.468690 1 config.go:105] "Starting endpoint slice config controller"
I1216 19:52:12.468695 1 shared_informer.go:313] Waiting for caches to sync for endpoint slice config
I1216 19:52:12.470281 1 config.go:329] "Starting node config controller"
I1216 19:52:12.472310 1 shared_informer.go:313] Waiting for caches to sync for node config
I1216 19:52:12.569322 1 shared_informer.go:320] Caches are synced for endpoint slice config
I1216 19:52:12.569326 1 shared_informer.go:320] Caches are synced for service config
I1216 19:52:12.572470 1 shared_informer.go:320] Caches are synced for node config
==> kube-scheduler [446adec279b3] <==
W1216 19:51:01.499231 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E1216 19:51:01.499289 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope" logger="UnhandledError"
W1216 19:51:01.578208 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E1216 19:51:01.578259 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"storageclasses\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
W1216 19:51:01.747435 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E1216 19:51:01.747487 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User \"system:kube-scheduler\" cannot list resource \"pods\" in API group \"\" at the cluster scope" logger="UnhandledError"
W1216 19:51:02.889902 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
E1216 19:51:02.890154 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicationcontrollers\" in API group \"\" at the cluster scope" logger="UnhandledError"
W1216 19:51:03.991404 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
E1216 19:51:03.991445 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError"
W1216 19:51:04.061316 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
E1216 19:51:04.061357 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError"
W1216 19:51:04.102087 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E1216 19:51:04.102132 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
W1216 19:51:04.387747 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.VolumeAttachment: volumeattachments.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "volumeattachments" in API group "storage.k8s.io" at the cluster scope
E1216 19:51:04.387790 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.VolumeAttachment: failed to list *v1.VolumeAttachment: volumeattachments.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"volumeattachments\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
I1216 19:51:15.904476 1 shared_informer.go:320] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
E1216 19:54:05.861793 1 framework.go:1316] "Plugin Failed" err="Operation cannot be fulfilled on pods/binding \"busybox-58667487b6-2bw6v\": pod busybox-58667487b6-2bw6v is already assigned to node \"ha-082404-m04\"" plugin="DefaultBinder" pod="default/busybox-58667487b6-2bw6v" node="ha-082404-m04"
E1216 19:54:05.862453 1 schedule_one.go:359] "scheduler cache ForgetPod failed" err="pod 6790bfd4-9506-4ddc-87a9-15b3648efed0(default/busybox-58667487b6-2bw6v) wasn't assumed so cannot be forgotten" pod="default/busybox-58667487b6-2bw6v"
E1216 19:54:05.862584 1 schedule_one.go:1058] "Error scheduling pod; retrying" err="running Bind plugin \"DefaultBinder\": Operation cannot be fulfilled on pods/binding \"busybox-58667487b6-2bw6v\": pod busybox-58667487b6-2bw6v is already assigned to node \"ha-082404-m04\"" pod="default/busybox-58667487b6-2bw6v"
I1216 19:54:05.862696 1 schedule_one.go:1071] "Pod has been assigned to node. Abort adding it back to queue." pod="default/busybox-58667487b6-2bw6v" node="ha-082404-m04"
I1216 19:54:39.355592 1 secure_serving.go:258] Stopped listening on 127.0.0.1:10259
I1216 19:54:39.355633 1 tlsconfig.go:258] "Shutting down DynamicServingCertificateController"
I1216 19:54:39.355860 1 configmap_cafile_content.go:226] "Shutting down controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file"
E1216 19:54:39.356634 1 run.go:72] "command failed" err="finished without leader elect"
==> kube-scheduler [d8ab2b1e58da] <==
W1216 19:55:20.341483 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E1216 19:55:20.341701 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope" logger="UnhandledError"
W1216 19:55:20.453090 1 reflector.go:569] runtime/asm_arm64.s:1223: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
E1216 19:55:20.453301 1 reflector.go:166] "Unhandled Error" err="runtime/asm_arm64.s:1223: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"extension-apiserver-authentication\" is forbidden: User \"system:kube-scheduler\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\"" logger="UnhandledError"
W1216 19:55:20.606441 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
E1216 19:55:20.606539 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User \"system:kube-scheduler\" cannot list resource \"namespaces\" in API group \"\" at the cluster scope" logger="UnhandledError"
W1216 19:55:20.662040 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E1216 19:55:20.662214 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
W1216 19:55:20.692269 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E1216 19:55:20.692480 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User \"system:kube-scheduler\" cannot list resource \"pods\" in API group \"\" at the cluster scope" logger="UnhandledError"
W1216 19:55:20.729246 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E1216 19:55:20.729379 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User \"system:kube-scheduler\" cannot list resource \"services\" in API group \"\" at the cluster scope" logger="UnhandledError"
W1216 19:55:21.043797 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E1216 19:55:21.043965 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
W1216 19:55:21.249588 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
E1216 19:55:21.249733 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicasets\" in API group \"apps\" at the cluster scope" logger="UnhandledError"
W1216 19:55:21.401310 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
E1216 19:55:21.401443 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csinodes\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
W1216 19:55:21.785339 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
E1216 19:55:21.785381 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicationcontrollers\" in API group \"\" at the cluster scope" logger="UnhandledError"
W1216 19:55:22.450063 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
E1216 19:55:22.450222 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User \"system:kube-scheduler\" cannot list resource \"nodes\" in API group \"\" at the cluster scope" logger="UnhandledError"
W1216 19:55:22.614569 1 reflector.go:569] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
E1216 19:55:22.614628 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError"
I1216 19:55:30.901803 1 shared_informer.go:320] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Dec 16 19:55:41 ha-082404 kubelet[1550]: E1216 19:55:41.732749 1550 kuberuntime_manager.go:1341] "Unhandled Error" err="container &Container{Name:coredns,Image:registry.k8s.io/coredns/coredns:v1.11.3,Command:[],Args:[-conf /etc/coredns/Corefile],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:dns,HostPort:0,ContainerPort:53,Protocol:UDP,HostIP:,},ContainerPort{Name:dns-tcp,HostPort:0,ContainerPort:53,Protocol:TCP,HostIP:,},ContainerPort{Name:metrics,HostPort:0,ContainerPort:9153,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{memory: {{178257920 0} {<nil>} 170Mi BinarySI},},Requests:ResourceList{cpu: {{100 -3} {<nil>} 100m DecimalSI},memory: {{73400320 0} {<nil>} 70Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-volume,ReadOnly:true,MountPath:/etc/coredns,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-j77pt,ReadOnly:true,MountPath:/var/run/secrets/kubern
etes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/health,Port:{0 8080 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:60,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:5,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/ready,Port:{0 8181 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:0,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[NET_BIND_SERVICE],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunA
sGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod coredns-668d6bf9bc-mwl2r_kube-system(84f8cad3-3121-4fae-83c0-9fe5c573d6d4): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars" logger="UnhandledError"
Dec 16 19:55:41 ha-082404 kubelet[1550]: I1216 19:55:41.738426 1550 scope.go:117] "RemoveContainer" containerID="7c8e374b5911964b21d6497101b917d7f7444905fb8aca42d07a5d36a6f1c607"
Dec 16 19:55:41 ha-082404 kubelet[1550]: I1216 19:55:41.738850 1550 scope.go:117] "RemoveContainer" containerID="1eb818213998658e85b4556cdc08f8d088f053cdbc968204be4192e5796cb9e1"
Dec 16 19:55:41 ha-082404 kubelet[1550]: E1216 19:55:41.744474 1550 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"coredns\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="kube-system/coredns-668d6bf9bc-mwl2r" podUID="84f8cad3-3121-4fae-83c0-9fe5c573d6d4"
Dec 16 19:55:41 ha-082404 kubelet[1550]: E1216 19:55:41.746500 1550 kuberuntime_manager.go:1341] "Unhandled Error" err="container &Container{Name:kube-proxy,Image:registry.k8s.io/kube-proxy:v1.32.0,Command:[/usr/local/bin/kube-proxy --config=/var/lib/kube-proxy/config.conf --hostname-override=$(NODE_NAME)],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:NODE_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:spec.nodeName,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-proxy,ReadOnly:false,MountPath:/var/lib/kube-proxy,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:xtables-lock,ReadOnly:false,MountPath:/run/xtables.lock,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:lib-modules,ReadOnly:true,MountP
ath:/lib/modules,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xpc6c,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod kube-proxy-x7xbp_kube-system(ce0d4ca6-fbc9-4f2f-996d-5bd01b41a14f): CreateContainerConfigError: services have not yet been read at least once, cannot const
ruct envvars" logger="UnhandledError"
Dec 16 19:55:41 ha-082404 kubelet[1550]: E1216 19:55:41.750977 1550 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-proxy\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="kube-system/kube-proxy-x7xbp" podUID="ce0d4ca6-fbc9-4f2f-996d-5bd01b41a14f"
Dec 16 19:55:41 ha-082404 kubelet[1550]: E1216 19:55:41.746597 1550 kuberuntime_manager.go:1341] "Unhandled Error" err="container &Container{Name:storage-provisioner,Image:gcr.io/k8s-minikube/storage-provisioner:v5,Command:[/storage-provisioner],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:tmp,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-pmfqx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProb
e:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod storage-provisioner_kube-system(3c0d0135-4746-4b03-9877-d30c5297116e): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars" logger="UnhandledError"
Dec 16 19:55:41 ha-082404 kubelet[1550]: E1216 19:55:41.760817 1550 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"storage-provisioner\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="kube-system/storage-provisioner" podUID="3c0d0135-4746-4b03-9877-d30c5297116e"
Dec 16 19:55:41 ha-082404 kubelet[1550]: E1216 19:55:41.772568 1550 kuberuntime_manager.go:1341] "Unhandled Error" err="container &Container{Name:kindnet-cni,Image:docker.io/kindest/kindnetd:v20241108-5c6d2daf,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:HOST_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.hostIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:POD_SUBNET,Value:10.244.0.0/16,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{100 -3} {<nil>} 100m DecimalSI},memory: {{52428800 0} {<nil>} 50Mi BinarySI},},Requests:ResourceList{cpu: {{100 -3} {<nil>} 100m DecimalSI},memory: {{52428800 0} {<nil>} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMoun
t{Name:cni-cfg,ReadOnly:false,MountPath:/etc/cni/net.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:xtables-lock,ReadOnly:false,MountPath:/run/xtables.lock,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:lib-modules,ReadOnly:true,MountPath:/lib/modules,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gc5k7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[NET_RAW NET_ADMIN],Drop:[],},Privileged:*false,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnc
e:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod kindnet-8nzqx_kube-system(c062cfe1-2c57-4040-8d48-673a935f60f6): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars" logger="UnhandledError"
Dec 16 19:55:41 ha-082404 kubelet[1550]: E1216 19:55:41.778058 1550 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kindnet-cni\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="kube-system/kindnet-8nzqx" podUID="c062cfe1-2c57-4040-8d48-673a935f60f6"
Dec 16 19:55:43 ha-082404 kubelet[1550]: I1216 19:55:43.730445 1550 scope.go:117] "RemoveContainer" containerID="6210fc1a4717d690ac0ea2f282f72ccf2e2fd735a51b3dc5aa99de9648fb8d0c"
Dec 16 19:55:43 ha-082404 kubelet[1550]: E1216 19:55:43.732690 1550 kuberuntime_manager.go:1341] "Unhandled Error" err="container &Container{Name:coredns,Image:registry.k8s.io/coredns/coredns:v1.11.3,Command:[],Args:[-conf /etc/coredns/Corefile],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:dns,HostPort:0,ContainerPort:53,Protocol:UDP,HostIP:,},ContainerPort{Name:dns-tcp,HostPort:0,ContainerPort:53,Protocol:TCP,HostIP:,},ContainerPort{Name:metrics,HostPort:0,ContainerPort:9153,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{memory: {{178257920 0} {<nil>} 170Mi BinarySI},},Requests:ResourceList{cpu: {{100 -3} {<nil>} 100m DecimalSI},memory: {{73400320 0} {<nil>} 70Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-volume,ReadOnly:true,MountPath:/etc/coredns,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-d2pns,ReadOnly:true,MountPath:/var/run/secrets/kubern
etes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/health,Port:{0 8080 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:60,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:5,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/ready,Port:{0 8181 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:0,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[NET_BIND_SERVICE],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunA
sGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod coredns-668d6bf9bc-9th4p_kube-system(56bab989-75df-426f-af86-73cef2741306): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars" logger="UnhandledError"
Dec 16 19:55:43 ha-082404 kubelet[1550]: E1216 19:55:43.734142 1550 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"coredns\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="kube-system/coredns-668d6bf9bc-9th4p" podUID="56bab989-75df-426f-af86-73cef2741306"
Dec 16 19:55:48 ha-082404 kubelet[1550]: E1216 19:55:48.944030 1550 summary_sys_containers.go:51] "Failed to get system container stats" err="failed to get cgroup stats for \"/kubepods\": failed to get container info for \"/kubepods\": unknown container \"/kubepods\"" containerName="/kubepods"
Dec 16 19:55:48 ha-082404 kubelet[1550]: E1216 19:55:48.944090 1550 helpers.go:851] "Eviction manager: failed to construct signal" err="system container \"pods\" not found in metrics" signal="allocatableMemory.available"
Dec 16 19:55:52 ha-082404 kubelet[1550]: I1216 19:55:52.729897 1550 scope.go:117] "RemoveContainer" containerID="6a5762f4756925da37a388b994da6d8386d3c97d40f775fc2107416eeda2fcf8"
Dec 16 19:55:52 ha-082404 kubelet[1550]: E1216 19:55:52.730106 1550 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"coredns\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=coredns pod=coredns-668d6bf9bc-mwl2r_kube-system(84f8cad3-3121-4fae-83c0-9fe5c573d6d4)\"" pod="kube-system/coredns-668d6bf9bc-mwl2r" podUID="84f8cad3-3121-4fae-83c0-9fe5c573d6d4"
Dec 16 19:55:53 ha-082404 kubelet[1550]: I1216 19:55:53.729872 1550 scope.go:117] "RemoveContainer" containerID="7c8e374b5911964b21d6497101b917d7f7444905fb8aca42d07a5d36a6f1c607"
Dec 16 19:55:54 ha-082404 kubelet[1550]: I1216 19:55:54.730523 1550 scope.go:117] "RemoveContainer" containerID="fb3fa2313cf97b14b6691ed06c9a4e06b659cbf82612c1aa2f5f293aae0521b5"
Dec 16 19:55:55 ha-082404 kubelet[1550]: I1216 19:55:55.729910 1550 scope.go:117] "RemoveContainer" containerID="96368e60b6cd3376c22e9babff0f7805b393bd16245039735963d803d363c107"
Dec 16 19:55:55 ha-082404 kubelet[1550]: I1216 19:55:55.730236 1550 scope.go:117] "RemoveContainer" containerID="6210fc1a4717d690ac0ea2f282f72ccf2e2fd735a51b3dc5aa99de9648fb8d0c"
Dec 16 19:55:55 ha-082404 kubelet[1550]: E1216 19:55:55.730588 1550 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"coredns\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=coredns pod=coredns-668d6bf9bc-9th4p_kube-system(56bab989-75df-426f-af86-73cef2741306)\"" pod="kube-system/coredns-668d6bf9bc-9th4p" podUID="56bab989-75df-426f-af86-73cef2741306"
Dec 16 19:55:56 ha-082404 kubelet[1550]: I1216 19:55:56.732954 1550 scope.go:117] "RemoveContainer" containerID="1eb818213998658e85b4556cdc08f8d088f053cdbc968204be4192e5796cb9e1"
Dec 16 19:56:05 ha-082404 kubelet[1550]: I1216 19:56:05.730162 1550 scope.go:117] "RemoveContainer" containerID="6a5762f4756925da37a388b994da6d8386d3c97d40f775fc2107416eeda2fcf8"
Dec 16 19:56:10 ha-082404 kubelet[1550]: I1216 19:56:10.732085 1550 scope.go:117] "RemoveContainer" containerID="6210fc1a4717d690ac0ea2f282f72ccf2e2fd735a51b3dc5aa99de9648fb8d0c"
-- /stdout --
helpers_test.go:254: (dbg) Run: out/minikube-linux-arm64 status --format={{.APIServer}} -p ha-082404 -n ha-082404
helpers_test.go:261: (dbg) Run: kubectl --context ha-082404 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:285: <<< TestMultiControlPlane/serial/RestartCluster FAILED: end of post-mortem logs <<<
helpers_test.go:286: ---------------------/post-mortem---------------------------------
--- FAIL: TestMultiControlPlane/serial/RestartCluster (109.02s)