=== RUN TestAddons/parallel/Ingress
=== PAUSE TestAddons/parallel/Ingress
=== CONT TestAddons/parallel/Ingress
addons_test.go:207: (dbg) Run: kubectl --context addons-248098 wait --for=condition=ready --namespace=ingress-nginx pod --selector=app.kubernetes.io/component=controller --timeout=90s
addons_test.go:232: (dbg) Run: kubectl --context addons-248098 replace --force -f testdata/nginx-ingress-v1.yaml
addons_test.go:245: (dbg) Run: kubectl --context addons-248098 replace --force -f testdata/nginx-pod-svc.yaml
addons_test.go:250: (dbg) TestAddons/parallel/Ingress: waiting 8m0s for pods matching "run=nginx" in namespace "default" ...
helpers_test.go:344: "nginx" [daa948b1-3e15-4f13-8b0d-ec8e9c2f7546] Pending / Ready:ContainersNotReady (containers with unready status: [nginx]) / ContainersReady:ContainersNotReady (containers with unready status: [nginx])
helpers_test.go:344: "nginx" [daa948b1-3e15-4f13-8b0d-ec8e9c2f7546] Running
addons_test.go:250: (dbg) TestAddons/parallel/Ingress: run=nginx healthy within 9.004987725s
I1213 19:23:39.397954 602199 kapi.go:150] Service nginx in namespace default found.
addons_test.go:262: (dbg) Run: out/minikube-linux-arm64 -p addons-248098 ssh "curl -s http://127.0.0.1/ -H 'Host: nginx.example.com'"
addons_test.go:262: (dbg) Non-zero exit: out/minikube-linux-arm64 -p addons-248098 ssh "curl -s http://127.0.0.1/ -H 'Host: nginx.example.com'": exit status 1 (2m10.235320918s)
** stderr **
ssh: Process exited with status 28
** /stderr **
addons_test.go:278: failed to get expected response from http://127.0.0.1/ within minikube: exit status 1
addons_test.go:286: (dbg) Run: kubectl --context addons-248098 replace --force -f testdata/ingress-dns-example-v1.yaml
addons_test.go:291: (dbg) Run: out/minikube-linux-arm64 -p addons-248098 ip
addons_test.go:297: (dbg) Run: nslookup hello-john.test 192.168.49.2
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:230: ======> post-mortem[TestAddons/parallel/Ingress]: docker inspect <======
helpers_test.go:231: (dbg) Run: docker inspect addons-248098
helpers_test.go:235: (dbg) docker inspect addons-248098:
-- stdout --
[
{
"Id": "71118ff07ec6fa79104cf400f95c50c9ae227a1aad64456bb5c81d1d75958776",
"Created": "2024-12-13T19:18:52.315159725Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 603478,
"ExitCode": 0,
"Error": "",
"StartedAt": "2024-12-13T19:18:52.484535042Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:7cd263f59e19eeefdb79b99186c433854c2243e3d7fa2988b2d817cac7fc54f8",
"ResolvConfPath": "/var/lib/docker/containers/71118ff07ec6fa79104cf400f95c50c9ae227a1aad64456bb5c81d1d75958776/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/71118ff07ec6fa79104cf400f95c50c9ae227a1aad64456bb5c81d1d75958776/hostname",
"HostsPath": "/var/lib/docker/containers/71118ff07ec6fa79104cf400f95c50c9ae227a1aad64456bb5c81d1d75958776/hosts",
"LogPath": "/var/lib/docker/containers/71118ff07ec6fa79104cf400f95c50c9ae227a1aad64456bb5c81d1d75958776/71118ff07ec6fa79104cf400f95c50c9ae227a1aad64456bb5c81d1d75958776-json.log",
"Name": "/addons-248098",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"addons-248098:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {}
},
"NetworkMode": "addons-248098",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 4194304000,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 8388608000,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"LowerDir": "/var/lib/docker/overlay2/1eb625f67d65df01d611761ab1363a88e29135b4887423b64c40650d552835e4-init/diff:/var/lib/docker/overlay2/7f60ef155cdf2fdd139012aca07bc58fe52fb18f995aec2de9b3156cc93a5c4e/diff",
"MergedDir": "/var/lib/docker/overlay2/1eb625f67d65df01d611761ab1363a88e29135b4887423b64c40650d552835e4/merged",
"UpperDir": "/var/lib/docker/overlay2/1eb625f67d65df01d611761ab1363a88e29135b4887423b64c40650d552835e4/diff",
"WorkDir": "/var/lib/docker/overlay2/1eb625f67d65df01d611761ab1363a88e29135b4887423b64c40650d552835e4/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "addons-248098",
"Source": "/var/lib/docker/volumes/addons-248098/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "addons-248098",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1734029593-20090@sha256:7b3f6168a578563fb342f21f0c926652b91ba060931e8fbc6c6ade3ac1d26ed9",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "addons-248098",
"name.minikube.sigs.k8s.io": "addons-248098",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "70c555dc0bf616658c39517ca754bbc8d0217eecb668e8d418b78ab6f8b69a36",
"SandboxKey": "/var/run/docker/netns/70c555dc0bf6",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33512"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33513"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33516"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33514"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33515"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"addons-248098": {
"IPAMConfig": {
"IPv4Address": "192.168.49.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "02:42:c0:a8:31:02",
"DriverOpts": null,
"NetworkID": "26d751c067fc6e1d561dda56dbfe217bd324778a2878c8a088bc311c8b3eb10d",
"EndpointID": "01de513d376697fd43bead3e31bc9770fd3b8196e20a57de45d46140386899ce",
"Gateway": "192.168.49.1",
"IPAddress": "192.168.49.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"addons-248098",
"71118ff07ec6"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:239: (dbg) Run: out/minikube-linux-arm64 status --format={{.Host}} -p addons-248098 -n addons-248098
helpers_test.go:244: <<< TestAddons/parallel/Ingress FAILED: start of post-mortem logs <<<
helpers_test.go:245: ======> post-mortem[TestAddons/parallel/Ingress]: minikube logs <======
helpers_test.go:247: (dbg) Run: out/minikube-linux-arm64 -p addons-248098 logs -n 25
helpers_test.go:247: (dbg) Done: out/minikube-linux-arm64 -p addons-248098 logs -n 25: (1.600483378s)
helpers_test.go:252: TestAddons/parallel/Ingress logs:
-- stdout --
==> Audit <==
|---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
| Command | Args | Profile | User | Version | Start Time | End Time |
|---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
| delete | -p download-only-307056 | download-only-307056 | jenkins | v1.34.0 | 13 Dec 24 19:18 UTC | 13 Dec 24 19:18 UTC |
| delete | -p download-only-161886 | download-only-161886 | jenkins | v1.34.0 | 13 Dec 24 19:18 UTC | 13 Dec 24 19:18 UTC |
| start | --download-only -p | download-docker-972085 | jenkins | v1.34.0 | 13 Dec 24 19:18 UTC | |
| | download-docker-972085 | | | | | |
| | --alsologtostderr | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=crio | | | | | |
| delete | -p download-docker-972085 | download-docker-972085 | jenkins | v1.34.0 | 13 Dec 24 19:18 UTC | 13 Dec 24 19:18 UTC |
| start | --download-only -p | binary-mirror-356185 | jenkins | v1.34.0 | 13 Dec 24 19:18 UTC | |
| | binary-mirror-356185 | | | | | |
| | --alsologtostderr | | | | | |
| | --binary-mirror | | | | | |
| | http://127.0.0.1:34457 | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=crio | | | | | |
| delete | -p binary-mirror-356185 | binary-mirror-356185 | jenkins | v1.34.0 | 13 Dec 24 19:18 UTC | 13 Dec 24 19:18 UTC |
| addons | disable dashboard -p | addons-248098 | jenkins | v1.34.0 | 13 Dec 24 19:18 UTC | |
| | addons-248098 | | | | | |
| addons | enable dashboard -p | addons-248098 | jenkins | v1.34.0 | 13 Dec 24 19:18 UTC | |
| | addons-248098 | | | | | |
| start | -p addons-248098 --wait=true | addons-248098 | jenkins | v1.34.0 | 13 Dec 24 19:18 UTC | 13 Dec 24 19:21 UTC |
| | --memory=4000 --alsologtostderr | | | | | |
| | --addons=registry | | | | | |
| | --addons=metrics-server | | | | | |
| | --addons=volumesnapshots | | | | | |
| | --addons=csi-hostpath-driver | | | | | |
| | --addons=gcp-auth | | | | | |
| | --addons=cloud-spanner | | | | | |
| | --addons=inspektor-gadget | | | | | |
| | --addons=nvidia-device-plugin | | | | | |
| | --addons=yakd --addons=volcano | | | | | |
| | --addons=amd-gpu-device-plugin | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=crio | | | | | |
| | --addons=ingress | | | | | |
| | --addons=ingress-dns | | | | | |
| | --addons=storage-provisioner-rancher | | | | | |
| addons | addons-248098 addons disable | addons-248098 | jenkins | v1.34.0 | 13 Dec 24 19:21 UTC | 13 Dec 24 19:21 UTC |
| | volcano --alsologtostderr -v=1 | | | | | |
| addons | addons-248098 addons disable | addons-248098 | jenkins | v1.34.0 | 13 Dec 24 19:21 UTC | 13 Dec 24 19:21 UTC |
| | gcp-auth --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| addons | enable headlamp | addons-248098 | jenkins | v1.34.0 | 13 Dec 24 19:21 UTC | 13 Dec 24 19:21 UTC |
| | -p addons-248098 | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-248098 addons disable | addons-248098 | jenkins | v1.34.0 | 13 Dec 24 19:21 UTC | 13 Dec 24 19:22 UTC |
| | headlamp --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| ip | addons-248098 ip | addons-248098 | jenkins | v1.34.0 | 13 Dec 24 19:22 UTC | 13 Dec 24 19:22 UTC |
| addons | addons-248098 addons disable | addons-248098 | jenkins | v1.34.0 | 13 Dec 24 19:22 UTC | 13 Dec 24 19:22 UTC |
| | registry --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| addons | addons-248098 addons disable | addons-248098 | jenkins | v1.34.0 | 13 Dec 24 19:22 UTC | 13 Dec 24 19:22 UTC |
| | yakd --alsologtostderr -v=1 | | | | | |
| addons | addons-248098 addons | addons-248098 | jenkins | v1.34.0 | 13 Dec 24 19:22 UTC | 13 Dec 24 19:22 UTC |
| | disable nvidia-device-plugin | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| ssh | addons-248098 ssh cat | addons-248098 | jenkins | v1.34.0 | 13 Dec 24 19:22 UTC | 13 Dec 24 19:22 UTC |
| | /opt/local-path-provisioner/pvc-3a3ae2c7-94c0-4b5c-a99c-675901123adf_default_test-pvc/file1 | | | | | |
| addons | addons-248098 addons | addons-248098 | jenkins | v1.34.0 | 13 Dec 24 19:22 UTC | 13 Dec 24 19:22 UTC |
| | disable cloud-spanner | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-248098 addons disable | addons-248098 | jenkins | v1.34.0 | 13 Dec 24 19:22 UTC | 13 Dec 24 19:22 UTC |
| | storage-provisioner-rancher | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-248098 addons | addons-248098 | jenkins | v1.34.0 | 13 Dec 24 19:23 UTC | 13 Dec 24 19:23 UTC |
| | disable volumesnapshots | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-248098 addons | addons-248098 | jenkins | v1.34.0 | 13 Dec 24 19:23 UTC | 13 Dec 24 19:23 UTC |
| | disable csi-hostpath-driver | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-248098 addons | addons-248098 | jenkins | v1.34.0 | 13 Dec 24 19:23 UTC | 13 Dec 24 19:23 UTC |
| | disable inspektor-gadget | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| ssh | addons-248098 ssh curl -s | addons-248098 | jenkins | v1.34.0 | 13 Dec 24 19:23 UTC | |
| | http://127.0.0.1/ -H 'Host: | | | | | |
| | nginx.example.com' | | | | | |
| ip | addons-248098 ip | addons-248098 | jenkins | v1.34.0 | 13 Dec 24 19:25 UTC | 13 Dec 24 19:25 UTC |
|---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
==> Last Start <==
Log file created at: 2024/12/13 19:18:27
Running on machine: ip-172-31-31-251
Binary: Built with gc go1.23.3 for linux/arm64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1213 19:18:27.055781 602969 out.go:345] Setting OutFile to fd 1 ...
I1213 19:18:27.056002 602969 out.go:392] TERM=,COLORTERM=, which probably does not support color
I1213 19:18:27.056033 602969 out.go:358] Setting ErrFile to fd 2...
I1213 19:18:27.056058 602969 out.go:392] TERM=,COLORTERM=, which probably does not support color
I1213 19:18:27.056425 602969 root.go:338] Updating PATH: /home/jenkins/minikube-integration/20090-596807/.minikube/bin
I1213 19:18:27.057083 602969 out.go:352] Setting JSON to false
I1213 19:18:27.058049 602969 start.go:129] hostinfo: {"hostname":"ip-172-31-31-251","uptime":10823,"bootTime":1734106684,"procs":170,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1072-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"982e3628-3742-4b3e-bb63-ac1b07660ec7"}
I1213 19:18:27.058208 602969 start.go:139] virtualization:
I1213 19:18:27.061235 602969 out.go:177] * [addons-248098] minikube v1.34.0 on Ubuntu 20.04 (arm64)
I1213 19:18:27.064472 602969 out.go:177] - MINIKUBE_LOCATION=20090
I1213 19:18:27.064499 602969 notify.go:220] Checking for updates...
I1213 19:18:27.068685 602969 out.go:177] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1213 19:18:27.070671 602969 out.go:177] - KUBECONFIG=/home/jenkins/minikube-integration/20090-596807/kubeconfig
I1213 19:18:27.073273 602969 out.go:177] - MINIKUBE_HOME=/home/jenkins/minikube-integration/20090-596807/.minikube
I1213 19:18:27.075308 602969 out.go:177] - MINIKUBE_BIN=out/minikube-linux-arm64
I1213 19:18:27.077562 602969 out.go:177] - MINIKUBE_FORCE_SYSTEMD=
I1213 19:18:27.080283 602969 driver.go:394] Setting default libvirt URI to qemu:///system
I1213 19:18:27.115987 602969 docker.go:123] docker version: linux-27.4.0:Docker Engine - Community
I1213 19:18:27.116107 602969 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1213 19:18:27.170408 602969 info.go:266] docker info: {ID:EOU5:DNGX:XN6V:L2FZ:UXRM:5TWK:EVUR:KC2F:GT7Z:Y4O4:GB77:5PD3 Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:25 OomKillDisable:true NGoroutines:44 SystemTime:2024-12-13 19:18:27.161534867 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1072-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214831104 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-31-251 Labels:[] ExperimentalBuild:false ServerVersion:27.4.0 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:88bf19b2105c8b17560993bee28a01ddc2f97182 Expected:88bf19b2105c8b17560993bee28a01ddc2f97182} RuncCommit:{ID:v1.2.2-0-g7cb3632 Expected:v1.2.2-0-g7cb3632} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:[WARNING: bridge
-nf-call-iptables is disabled WARNING: bridge-nf-call-ip6tables is disabled] ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.19.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.31.0]] Warnings:<nil>}}
I1213 19:18:27.170522 602969 docker.go:318] overlay module found
I1213 19:18:27.172847 602969 out.go:177] * Using the docker driver based on user configuration
I1213 19:18:27.175263 602969 start.go:297] selected driver: docker
I1213 19:18:27.175290 602969 start.go:901] validating driver "docker" against <nil>
I1213 19:18:27.175322 602969 start.go:912] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1213 19:18:27.176042 602969 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1213 19:18:27.234146 602969 info.go:266] docker info: {ID:EOU5:DNGX:XN6V:L2FZ:UXRM:5TWK:EVUR:KC2F:GT7Z:Y4O4:GB77:5PD3 Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:25 OomKillDisable:true NGoroutines:44 SystemTime:2024-12-13 19:18:27.225155419 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1072-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214831104 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-31-251 Labels:[] ExperimentalBuild:false ServerVersion:27.4.0 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:88bf19b2105c8b17560993bee28a01ddc2f97182 Expected:88bf19b2105c8b17560993bee28a01ddc2f97182} RuncCommit:{ID:v1.2.2-0-g7cb3632 Expected:v1.2.2-0-g7cb3632} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:[WARNING: bridge
-nf-call-iptables is disabled WARNING: bridge-nf-call-ip6tables is disabled] ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.19.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.31.0]] Warnings:<nil>}}
I1213 19:18:27.234392 602969 start_flags.go:310] no existing cluster config was found, will generate one from the flags
I1213 19:18:27.234624 602969 start_flags.go:947] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1213 19:18:27.236939 602969 out.go:177] * Using Docker driver with root privileges
I1213 19:18:27.239129 602969 cni.go:84] Creating CNI manager for ""
I1213 19:18:27.239209 602969 cni.go:143] "docker" driver + "crio" runtime found, recommending kindnet
I1213 19:18:27.239231 602969 start_flags.go:319] Found "CNI" CNI - setting NetworkPlugin=cni
I1213 19:18:27.239319 602969 start.go:340] cluster config:
{Name:addons-248098 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1734029593-20090@sha256:7b3f6168a578563fb342f21f0c926652b91ba060931e8fbc6c6ade3ac1d26ed9 Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.2 ClusterName:addons-248098 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime
:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.31.2 ContainerRuntime:crio ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSH
AgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1213 19:18:27.243510 602969 out.go:177] * Starting "addons-248098" primary control-plane node in "addons-248098" cluster
I1213 19:18:27.245521 602969 cache.go:121] Beginning downloading kic base image for docker with crio
I1213 19:18:27.247755 602969 out.go:177] * Pulling base image v0.0.45-1734029593-20090 ...
I1213 19:18:27.249819 602969 preload.go:131] Checking if preload exists for k8s version v1.31.2 and runtime crio
I1213 19:18:27.249905 602969 preload.go:146] Found local preload: /home/jenkins/minikube-integration/20090-596807/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.2-cri-o-overlay-arm64.tar.lz4
I1213 19:18:27.249904 602969 image.go:79] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1734029593-20090@sha256:7b3f6168a578563fb342f21f0c926652b91ba060931e8fbc6c6ade3ac1d26ed9 in local docker daemon
I1213 19:18:27.249919 602969 cache.go:56] Caching tarball of preloaded images
I1213 19:18:27.250084 602969 preload.go:172] Found /home/jenkins/minikube-integration/20090-596807/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.2-cri-o-overlay-arm64.tar.lz4 in cache, skipping download
I1213 19:18:27.250176 602969 cache.go:59] Finished verifying existence of preloaded tar for v1.31.2 on crio
I1213 19:18:27.250719 602969 profile.go:143] Saving config to /home/jenkins/minikube-integration/20090-596807/.minikube/profiles/addons-248098/config.json ...
I1213 19:18:27.250768 602969 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20090-596807/.minikube/profiles/addons-248098/config.json: {Name:mk4985bbfdf21426c540bab4f5039b3f705d29dc Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1213 19:18:27.266401 602969 cache.go:149] Downloading gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1734029593-20090@sha256:7b3f6168a578563fb342f21f0c926652b91ba060931e8fbc6c6ade3ac1d26ed9 to local cache
I1213 19:18:27.266532 602969 image.go:63] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1734029593-20090@sha256:7b3f6168a578563fb342f21f0c926652b91ba060931e8fbc6c6ade3ac1d26ed9 in local cache directory
I1213 19:18:27.266554 602969 image.go:66] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1734029593-20090@sha256:7b3f6168a578563fb342f21f0c926652b91ba060931e8fbc6c6ade3ac1d26ed9 in local cache directory, skipping pull
I1213 19:18:27.266559 602969 image.go:135] gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1734029593-20090@sha256:7b3f6168a578563fb342f21f0c926652b91ba060931e8fbc6c6ade3ac1d26ed9 exists in cache, skipping pull
I1213 19:18:27.266567 602969 cache.go:152] successfully saved gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1734029593-20090@sha256:7b3f6168a578563fb342f21f0c926652b91ba060931e8fbc6c6ade3ac1d26ed9 as a tarball
I1213 19:18:27.266572 602969 cache.go:162] Loading gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1734029593-20090@sha256:7b3f6168a578563fb342f21f0c926652b91ba060931e8fbc6c6ade3ac1d26ed9 from local cache
I1213 19:18:45.145346 602969 cache.go:164] successfully loaded and using gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1734029593-20090@sha256:7b3f6168a578563fb342f21f0c926652b91ba060931e8fbc6c6ade3ac1d26ed9 from cached tarball
I1213 19:18:45.145398 602969 cache.go:194] Successfully downloaded all kic artifacts
I1213 19:18:45.145432 602969 start.go:360] acquireMachinesLock for addons-248098: {Name:mk90cd79b2d7e9671af7af8749755f35a5159dc4 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1213 19:18:45.147734 602969 start.go:364] duration metric: took 2.261167ms to acquireMachinesLock for "addons-248098"
I1213 19:18:45.147808 602969 start.go:93] Provisioning new machine with config: &{Name:addons-248098 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1734029593-20090@sha256:7b3f6168a578563fb342f21f0c926652b91ba060931e8fbc6c6ade3ac1d26ed9 Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.2 ClusterName:addons-248098 Namespace:default APIServerHAVIP: APIServerName:min
ikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.31.2 ContainerRuntime:crio ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQe
muFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.31.2 ContainerRuntime:crio ControlPlane:true Worker:true}
I1213 19:18:45.147954 602969 start.go:125] createHost starting for "" (driver="docker")
I1213 19:18:45.160055 602969 out.go:235] * Creating docker container (CPUs=2, Memory=4000MB) ...
I1213 19:18:45.160417 602969 start.go:159] libmachine.API.Create for "addons-248098" (driver="docker")
I1213 19:18:45.160457 602969 client.go:168] LocalClient.Create starting
I1213 19:18:45.160603 602969 main.go:141] libmachine: Creating CA: /home/jenkins/minikube-integration/20090-596807/.minikube/certs/ca.pem
I1213 19:18:45.524939 602969 main.go:141] libmachine: Creating client certificate: /home/jenkins/minikube-integration/20090-596807/.minikube/certs/cert.pem
I1213 19:18:45.865688 602969 cli_runner.go:164] Run: docker network inspect addons-248098 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W1213 19:18:45.887665 602969 cli_runner.go:211] docker network inspect addons-248098 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I1213 19:18:45.887747 602969 network_create.go:284] running [docker network inspect addons-248098] to gather additional debugging logs...
I1213 19:18:45.887768 602969 cli_runner.go:164] Run: docker network inspect addons-248098
W1213 19:18:45.903678 602969 cli_runner.go:211] docker network inspect addons-248098 returned with exit code 1
I1213 19:18:45.903718 602969 network_create.go:287] error running [docker network inspect addons-248098]: docker network inspect addons-248098: exit status 1
stdout:
[]
stderr:
Error response from daemon: network addons-248098 not found
I1213 19:18:45.903730 602969 network_create.go:289] output of [docker network inspect addons-248098]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network addons-248098 not found
** /stderr **
I1213 19:18:45.903836 602969 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1213 19:18:45.920427 602969 network.go:206] using free private subnet 192.168.49.0/24: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0x4001ced260}
I1213 19:18:45.920475 602969 network_create.go:124] attempt to create docker network addons-248098 192.168.49.0/24 with gateway 192.168.49.1 and MTU of 1500 ...
I1213 19:18:45.920541 602969 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.49.0/24 --gateway=192.168.49.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=addons-248098 addons-248098
I1213 19:18:45.995385 602969 network_create.go:108] docker network addons-248098 192.168.49.0/24 created
I1213 19:18:45.995429 602969 kic.go:121] calculated static IP "192.168.49.2" for the "addons-248098" container
I1213 19:18:45.995509 602969 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I1213 19:18:46.022595 602969 cli_runner.go:164] Run: docker volume create addons-248098 --label name.minikube.sigs.k8s.io=addons-248098 --label created_by.minikube.sigs.k8s.io=true
I1213 19:18:46.040850 602969 oci.go:103] Successfully created a docker volume addons-248098
I1213 19:18:46.040947 602969 cli_runner.go:164] Run: docker run --rm --name addons-248098-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-248098 --entrypoint /usr/bin/test -v addons-248098:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1734029593-20090@sha256:7b3f6168a578563fb342f21f0c926652b91ba060931e8fbc6c6ade3ac1d26ed9 -d /var/lib
I1213 19:18:48.146229 602969 cli_runner.go:217] Completed: docker run --rm --name addons-248098-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-248098 --entrypoint /usr/bin/test -v addons-248098:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1734029593-20090@sha256:7b3f6168a578563fb342f21f0c926652b91ba060931e8fbc6c6ade3ac1d26ed9 -d /var/lib: (2.10523956s)
I1213 19:18:48.146288 602969 oci.go:107] Successfully prepared a docker volume addons-248098
I1213 19:18:48.146330 602969 preload.go:131] Checking if preload exists for k8s version v1.31.2 and runtime crio
I1213 19:18:48.146358 602969 kic.go:194] Starting extracting preloaded images to volume ...
I1213 19:18:48.146436 602969 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/20090-596807/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.2-cri-o-overlay-arm64.tar.lz4:/preloaded.tar:ro -v addons-248098:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1734029593-20090@sha256:7b3f6168a578563fb342f21f0c926652b91ba060931e8fbc6c6ade3ac1d26ed9 -I lz4 -xf /preloaded.tar -C /extractDir
I1213 19:18:52.242035 602969 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/20090-596807/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.2-cri-o-overlay-arm64.tar.lz4:/preloaded.tar:ro -v addons-248098:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1734029593-20090@sha256:7b3f6168a578563fb342f21f0c926652b91ba060931e8fbc6c6ade3ac1d26ed9 -I lz4 -xf /preloaded.tar -C /extractDir: (4.095557848s)
I1213 19:18:52.242067 602969 kic.go:203] duration metric: took 4.095714766s to extract preloaded images to volume ...
W1213 19:18:52.242215 602969 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I1213 19:18:52.242404 602969 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I1213 19:18:52.300006 602969 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname addons-248098 --name addons-248098 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-248098 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=addons-248098 --network addons-248098 --ip 192.168.49.2 --volume addons-248098:/var --security-opt apparmor=unconfined --memory=4000mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1734029593-20090@sha256:7b3f6168a578563fb342f21f0c926652b91ba060931e8fbc6c6ade3ac1d26ed9
I1213 19:18:52.684589 602969 cli_runner.go:164] Run: docker container inspect addons-248098 --format={{.State.Running}}
I1213 19:18:52.708745 602969 cli_runner.go:164] Run: docker container inspect addons-248098 --format={{.State.Status}}
I1213 19:18:52.731796 602969 cli_runner.go:164] Run: docker exec addons-248098 stat /var/lib/dpkg/alternatives/iptables
I1213 19:18:52.783069 602969 oci.go:144] the created container "addons-248098" has a running status.
I1213 19:18:52.783097 602969 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/20090-596807/.minikube/machines/addons-248098/id_rsa...
I1213 19:18:53.755040 602969 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/20090-596807/.minikube/machines/addons-248098/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I1213 19:18:53.776445 602969 cli_runner.go:164] Run: docker container inspect addons-248098 --format={{.State.Status}}
I1213 19:18:53.796248 602969 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I1213 19:18:53.796269 602969 kic_runner.go:114] Args: [docker exec --privileged addons-248098 chown docker:docker /home/docker/.ssh/authorized_keys]
I1213 19:18:53.850724 602969 cli_runner.go:164] Run: docker container inspect addons-248098 --format={{.State.Status}}
I1213 19:18:53.868534 602969 machine.go:93] provisionDockerMachine start ...
I1213 19:18:53.868627 602969 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-248098
I1213 19:18:53.888235 602969 main.go:141] libmachine: Using SSH client type: native
I1213 19:18:53.888513 602969 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x416340] 0x418b80 <nil> [] 0s} 127.0.0.1 33512 <nil> <nil>}
I1213 19:18:53.888529 602969 main.go:141] libmachine: About to run SSH command:
hostname
I1213 19:18:54.034254 602969 main.go:141] libmachine: SSH cmd err, output: <nil>: addons-248098
I1213 19:18:54.034303 602969 ubuntu.go:169] provisioning hostname "addons-248098"
I1213 19:18:54.034373 602969 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-248098
I1213 19:18:54.054913 602969 main.go:141] libmachine: Using SSH client type: native
I1213 19:18:54.055181 602969 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x416340] 0x418b80 <nil> [] 0s} 127.0.0.1 33512 <nil> <nil>}
I1213 19:18:54.055206 602969 main.go:141] libmachine: About to run SSH command:
sudo hostname addons-248098 && echo "addons-248098" | sudo tee /etc/hostname
I1213 19:18:54.214170 602969 main.go:141] libmachine: SSH cmd err, output: <nil>: addons-248098
I1213 19:18:54.214261 602969 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-248098
I1213 19:18:54.231476 602969 main.go:141] libmachine: Using SSH client type: native
I1213 19:18:54.231736 602969 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x416340] 0x418b80 <nil> [] 0s} 127.0.0.1 33512 <nil> <nil>}
I1213 19:18:54.231760 602969 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\saddons-248098' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 addons-248098/g' /etc/hosts;
else
echo '127.0.1.1 addons-248098' | sudo tee -a /etc/hosts;
fi
fi
I1213 19:18:54.378633 602969 main.go:141] libmachine: SSH cmd err, output: <nil>:
I1213 19:18:54.378661 602969 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/20090-596807/.minikube CaCertPath:/home/jenkins/minikube-integration/20090-596807/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/20090-596807/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/20090-596807/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/20090-596807/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/20090-596807/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/20090-596807/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/20090-596807/.minikube}
I1213 19:18:54.378688 602969 ubuntu.go:177] setting up certificates
I1213 19:18:54.378698 602969 provision.go:84] configureAuth start
I1213 19:18:54.378769 602969 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-248098
I1213 19:18:54.395597 602969 provision.go:143] copyHostCerts
I1213 19:18:54.395681 602969 exec_runner.go:151] cp: /home/jenkins/minikube-integration/20090-596807/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/20090-596807/.minikube/ca.pem (1082 bytes)
I1213 19:18:54.395809 602969 exec_runner.go:151] cp: /home/jenkins/minikube-integration/20090-596807/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/20090-596807/.minikube/cert.pem (1123 bytes)
I1213 19:18:54.395898 602969 exec_runner.go:151] cp: /home/jenkins/minikube-integration/20090-596807/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/20090-596807/.minikube/key.pem (1679 bytes)
I1213 19:18:54.395967 602969 provision.go:117] generating server cert: /home/jenkins/minikube-integration/20090-596807/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/20090-596807/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/20090-596807/.minikube/certs/ca-key.pem org=jenkins.addons-248098 san=[127.0.0.1 192.168.49.2 addons-248098 localhost minikube]
I1213 19:18:54.809899 602969 provision.go:177] copyRemoteCerts
I1213 19:18:54.809970 602969 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1213 19:18:54.810013 602969 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-248098
I1213 19:18:54.827762 602969 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33512 SSHKeyPath:/home/jenkins/minikube-integration/20090-596807/.minikube/machines/addons-248098/id_rsa Username:docker}
I1213 19:18:54.931460 602969 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20090-596807/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1213 19:18:54.956121 602969 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20090-596807/.minikube/machines/server.pem --> /etc/docker/server.pem (1208 bytes)
I1213 19:18:54.980178 602969 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20090-596807/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I1213 19:18:55.013876 602969 provision.go:87] duration metric: took 635.158103ms to configureAuth
I1213 19:18:55.013918 602969 ubuntu.go:193] setting minikube options for container-runtime
I1213 19:18:55.014153 602969 config.go:182] Loaded profile config "addons-248098": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.31.2
I1213 19:18:55.014302 602969 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-248098
I1213 19:18:55.071101 602969 main.go:141] libmachine: Using SSH client type: native
I1213 19:18:55.071374 602969 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x416340] 0x418b80 <nil> [] 0s} 127.0.0.1 33512 <nil> <nil>}
I1213 19:18:55.071398 602969 main.go:141] libmachine: About to run SSH command:
sudo mkdir -p /etc/sysconfig && printf %s "
CRIO_MINIKUBE_OPTIONS='--insecure-registry 10.96.0.0/12 '
" | sudo tee /etc/sysconfig/crio.minikube && sudo systemctl restart crio
I1213 19:18:55.329830 602969 main.go:141] libmachine: SSH cmd err, output: <nil>:
CRIO_MINIKUBE_OPTIONS='--insecure-registry 10.96.0.0/12 '
I1213 19:18:55.329852 602969 machine.go:96] duration metric: took 1.461297288s to provisionDockerMachine
I1213 19:18:55.329863 602969 client.go:171] duration metric: took 10.169398436s to LocalClient.Create
I1213 19:18:55.329883 602969 start.go:167] duration metric: took 10.169469633s to libmachine.API.Create "addons-248098"
I1213 19:18:55.329891 602969 start.go:293] postStartSetup for "addons-248098" (driver="docker")
I1213 19:18:55.329901 602969 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1213 19:18:55.329970 602969 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1213 19:18:55.330017 602969 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-248098
I1213 19:18:55.347094 602969 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33512 SSHKeyPath:/home/jenkins/minikube-integration/20090-596807/.minikube/machines/addons-248098/id_rsa Username:docker}
I1213 19:18:55.447547 602969 ssh_runner.go:195] Run: cat /etc/os-release
I1213 19:18:55.450755 602969 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1213 19:18:55.450790 602969 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I1213 19:18:55.450804 602969 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I1213 19:18:55.450812 602969 info.go:137] Remote host: Ubuntu 22.04.5 LTS
I1213 19:18:55.450823 602969 filesync.go:126] Scanning /home/jenkins/minikube-integration/20090-596807/.minikube/addons for local assets ...
I1213 19:18:55.450895 602969 filesync.go:126] Scanning /home/jenkins/minikube-integration/20090-596807/.minikube/files for local assets ...
I1213 19:18:55.450920 602969 start.go:296] duration metric: took 121.02358ms for postStartSetup
I1213 19:18:55.451245 602969 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-248098
I1213 19:18:55.467710 602969 profile.go:143] Saving config to /home/jenkins/minikube-integration/20090-596807/.minikube/profiles/addons-248098/config.json ...
I1213 19:18:55.468009 602969 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1213 19:18:55.468062 602969 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-248098
I1213 19:18:55.485705 602969 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33512 SSHKeyPath:/home/jenkins/minikube-integration/20090-596807/.minikube/machines/addons-248098/id_rsa Username:docker}
I1213 19:18:55.583312 602969 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1213 19:18:55.588009 602969 start.go:128] duration metric: took 10.4400343s to createHost
I1213 19:18:55.588034 602969 start.go:83] releasing machines lock for "addons-248098", held for 10.440259337s
I1213 19:18:55.588121 602969 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-248098
I1213 19:18:55.604873 602969 ssh_runner.go:195] Run: cat /version.json
I1213 19:18:55.604925 602969 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-248098
I1213 19:18:55.605175 602969 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1213 19:18:55.605236 602969 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-248098
I1213 19:18:55.625747 602969 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33512 SSHKeyPath:/home/jenkins/minikube-integration/20090-596807/.minikube/machines/addons-248098/id_rsa Username:docker}
I1213 19:18:55.641932 602969 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33512 SSHKeyPath:/home/jenkins/minikube-integration/20090-596807/.minikube/machines/addons-248098/id_rsa Username:docker}
I1213 19:18:55.865572 602969 ssh_runner.go:195] Run: systemctl --version
I1213 19:18:55.869608 602969 ssh_runner.go:195] Run: sudo sh -c "podman version >/dev/null"
I1213 19:18:56.023964 602969 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I1213 19:18:56.028708 602969 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1213 19:18:56.050673 602969 cni.go:221] loopback cni configuration disabled: "/etc/cni/net.d/*loopback.conf*" found
I1213 19:18:56.050757 602969 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1213 19:18:56.083729 602969 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I1213 19:18:56.083750 602969 start.go:495] detecting cgroup driver to use...
I1213 19:18:56.083785 602969 detect.go:187] detected "cgroupfs" cgroup driver on host os
I1213 19:18:56.083835 602969 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
I1213 19:18:56.099746 602969 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I1213 19:18:56.111443 602969 docker.go:217] disabling cri-docker service (if available) ...
I1213 19:18:56.111553 602969 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I1213 19:18:56.125763 602969 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I1213 19:18:56.140789 602969 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I1213 19:18:56.237908 602969 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I1213 19:18:56.325624 602969 docker.go:233] disabling docker service ...
I1213 19:18:56.325743 602969 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I1213 19:18:56.346209 602969 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I1213 19:18:56.359581 602969 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I1213 19:18:56.451957 602969 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I1213 19:18:56.550085 602969 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1213 19:18:56.563345 602969 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/crio/crio.sock
" | sudo tee /etc/crictl.yaml"
I1213 19:18:56.581145 602969 crio.go:59] configure cri-o to use "registry.k8s.io/pause:3.10" pause image...
I1213 19:18:56.581234 602969 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|^.*pause_image = .*$|pause_image = "registry.k8s.io/pause:3.10"|' /etc/crio/crio.conf.d/02-crio.conf"
I1213 19:18:56.592261 602969 crio.go:70] configuring cri-o to use "cgroupfs" as cgroup driver...
I1213 19:18:56.592350 602969 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|^.*cgroup_manager = .*$|cgroup_manager = "cgroupfs"|' /etc/crio/crio.conf.d/02-crio.conf"
I1213 19:18:56.603099 602969 ssh_runner.go:195] Run: sh -c "sudo sed -i '/conmon_cgroup = .*/d' /etc/crio/crio.conf.d/02-crio.conf"
I1213 19:18:56.613956 602969 ssh_runner.go:195] Run: sh -c "sudo sed -i '/cgroup_manager = .*/a conmon_cgroup = "pod"' /etc/crio/crio.conf.d/02-crio.conf"
I1213 19:18:56.624912 602969 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1213 19:18:56.634235 602969 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *"net.ipv4.ip_unprivileged_port_start=.*"/d' /etc/crio/crio.conf.d/02-crio.conf"
I1213 19:18:56.644471 602969 ssh_runner.go:195] Run: sh -c "sudo grep -q "^ *default_sysctls" /etc/crio/crio.conf.d/02-crio.conf || sudo sed -i '/conmon_cgroup = .*/a default_sysctls = \[\n\]' /etc/crio/crio.conf.d/02-crio.conf"
I1213 19:18:56.660595 602969 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^default_sysctls *= *\[|&\n "net.ipv4.ip_unprivileged_port_start=0",|' /etc/crio/crio.conf.d/02-crio.conf"
I1213 19:18:56.670646 602969 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1213 19:18:56.679462 602969 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1213 19:18:56.688378 602969 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1213 19:18:56.766966 602969 ssh_runner.go:195] Run: sudo systemctl restart crio
I1213 19:18:56.886569 602969 start.go:542] Will wait 60s for socket path /var/run/crio/crio.sock
I1213 19:18:56.886719 602969 ssh_runner.go:195] Run: stat /var/run/crio/crio.sock
I1213 19:18:56.890716 602969 start.go:563] Will wait 60s for crictl version
I1213 19:18:56.890833 602969 ssh_runner.go:195] Run: which crictl
I1213 19:18:56.894217 602969 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I1213 19:18:56.932502 602969 start.go:579] Version: 0.1.0
RuntimeName: cri-o
RuntimeVersion: 1.24.6
RuntimeApiVersion: v1
I1213 19:18:56.932611 602969 ssh_runner.go:195] Run: crio --version
I1213 19:18:56.971355 602969 ssh_runner.go:195] Run: crio --version
I1213 19:18:57.021813 602969 out.go:177] * Preparing Kubernetes v1.31.2 on CRI-O 1.24.6 ...
I1213 19:18:57.024214 602969 cli_runner.go:164] Run: docker network inspect addons-248098 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1213 19:18:57.042490 602969 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I1213 19:18:57.046587 602969 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1213 19:18:57.059450 602969 kubeadm.go:883] updating cluster {Name:addons-248098 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1734029593-20090@sha256:7b3f6168a578563fb342f21f0c926652b91ba060931e8fbc6c6ade3ac1d26ed9 Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.2 ClusterName:addons-248098 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNa
mes:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.31.2 ContainerRuntime:crio ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmw
arePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1213 19:18:57.059574 602969 preload.go:131] Checking if preload exists for k8s version v1.31.2 and runtime crio
I1213 19:18:57.059643 602969 ssh_runner.go:195] Run: sudo crictl images --output json
I1213 19:18:57.137675 602969 crio.go:514] all images are preloaded for cri-o runtime.
I1213 19:18:57.137698 602969 crio.go:433] Images already preloaded, skipping extraction
I1213 19:18:57.137761 602969 ssh_runner.go:195] Run: sudo crictl images --output json
I1213 19:18:57.173787 602969 crio.go:514] all images are preloaded for cri-o runtime.
I1213 19:18:57.173812 602969 cache_images.go:84] Images are preloaded, skipping loading
I1213 19:18:57.173820 602969 kubeadm.go:934] updating node { 192.168.49.2 8443 v1.31.2 crio true true} ...
I1213 19:18:57.173921 602969 kubeadm.go:946] kubelet [Unit]
Wants=crio.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.31.2/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroups-per-qos=false --config=/var/lib/kubelet/config.yaml --enforce-node-allocatable= --hostname-override=addons-248098 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.2
[Install]
config:
{KubernetesVersion:v1.31.2 ClusterName:addons-248098 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1213 19:18:57.174003 602969 ssh_runner.go:195] Run: crio config
I1213 19:18:57.222356 602969 cni.go:84] Creating CNI manager for ""
I1213 19:18:57.222379 602969 cni.go:143] "docker" driver + "crio" runtime found, recommending kindnet
I1213 19:18:57.222389 602969 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
I1213 19:18:57.222411 602969 kubeadm.go:189] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8443 KubernetesVersion:v1.31.2 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:addons-248098 NodeName:addons-248098 DNSDomain:cluster.local CRISocket:/var/run/crio/crio.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.49.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kuberne
tes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/crio/crio.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1213 19:18:57.222539 602969 kubeadm.go:195] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.49.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/crio/crio.sock
name: "addons-248098"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.49.2"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
- name: "proxy-refresh-interval"
value: "70000"
kubernetesVersion: v1.31.2
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///var/run/crio/crio.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1213 19:18:57.222611 602969 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.31.2
I1213 19:18:57.231575 602969 binaries.go:44] Found k8s binaries, skipping transfer
I1213 19:18:57.231687 602969 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1213 19:18:57.240565 602969 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (363 bytes)
I1213 19:18:57.259184 602969 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1213 19:18:57.277514 602969 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2287 bytes)
I1213 19:18:57.297096 602969 ssh_runner.go:195] Run: grep 192.168.49.2 control-plane.minikube.internal$ /etc/hosts
I1213 19:18:57.300762 602969 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1213 19:18:57.311998 602969 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1213 19:18:57.393593 602969 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1213 19:18:57.407504 602969 certs.go:68] Setting up /home/jenkins/minikube-integration/20090-596807/.minikube/profiles/addons-248098 for IP: 192.168.49.2
I1213 19:18:57.407530 602969 certs.go:194] generating shared ca certs ...
I1213 19:18:57.407547 602969 certs.go:226] acquiring lock for ca certs: {Name:mk3cdd0ea94f7f906448b193b6df25da3e2261b6 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1213 19:18:57.407685 602969 certs.go:240] generating "minikubeCA" ca cert: /home/jenkins/minikube-integration/20090-596807/.minikube/ca.key
I1213 19:18:57.753657 602969 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/20090-596807/.minikube/ca.crt ...
I1213 19:18:57.753689 602969 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20090-596807/.minikube/ca.crt: {Name:mkd47ec227d5a0a992364ca75af37df461bf8251 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1213 19:18:57.754556 602969 crypto.go:164] Writing key to /home/jenkins/minikube-integration/20090-596807/.minikube/ca.key ...
I1213 19:18:57.754574 602969 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20090-596807/.minikube/ca.key: {Name:mk99e7ab436fef1f7051dabcc331ea2d120ce21b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1213 19:18:57.754673 602969 certs.go:240] generating "proxyClientCA" ca cert: /home/jenkins/minikube-integration/20090-596807/.minikube/proxy-client-ca.key
I1213 19:18:57.965859 602969 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/20090-596807/.minikube/proxy-client-ca.crt ...
I1213 19:18:57.965891 602969 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20090-596807/.minikube/proxy-client-ca.crt: {Name:mkd3882d2ccf5bff7977b8f91ec4b985ade96ca8 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1213 19:18:57.966508 602969 crypto.go:164] Writing key to /home/jenkins/minikube-integration/20090-596807/.minikube/proxy-client-ca.key ...
I1213 19:18:57.966527 602969 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20090-596807/.minikube/proxy-client-ca.key: {Name:mk9f1e77620da4f62399f28c89e1e49e6502ff2a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1213 19:18:57.966625 602969 certs.go:256] generating profile certs ...
I1213 19:18:57.966697 602969 certs.go:363] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/20090-596807/.minikube/profiles/addons-248098/client.key
I1213 19:18:57.966723 602969 crypto.go:68] Generating cert /home/jenkins/minikube-integration/20090-596807/.minikube/profiles/addons-248098/client.crt with IP's: []
I1213 19:18:58.272499 602969 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/20090-596807/.minikube/profiles/addons-248098/client.crt ...
I1213 19:18:58.272535 602969 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20090-596807/.minikube/profiles/addons-248098/client.crt: {Name:mk65d52d2f3cffee39c58a204c5c86169e26beed Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1213 19:18:58.273970 602969 crypto.go:164] Writing key to /home/jenkins/minikube-integration/20090-596807/.minikube/profiles/addons-248098/client.key ...
I1213 19:18:58.273989 602969 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20090-596807/.minikube/profiles/addons-248098/client.key: {Name:mk7cf318e896508552eb82f0ebadb2445f7082e5 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1213 19:18:58.274084 602969 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/20090-596807/.minikube/profiles/addons-248098/apiserver.key.2386a425
I1213 19:18:58.274106 602969 crypto.go:68] Generating cert /home/jenkins/minikube-integration/20090-596807/.minikube/profiles/addons-248098/apiserver.crt.2386a425 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.49.2]
I1213 19:18:58.651536 602969 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/20090-596807/.minikube/profiles/addons-248098/apiserver.crt.2386a425 ...
I1213 19:18:58.651567 602969 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20090-596807/.minikube/profiles/addons-248098/apiserver.crt.2386a425: {Name:mke53ea42652e58e64dcdd4b89ef7f4a4a14f85c Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1213 19:18:58.652283 602969 crypto.go:164] Writing key to /home/jenkins/minikube-integration/20090-596807/.minikube/profiles/addons-248098/apiserver.key.2386a425 ...
I1213 19:18:58.652304 602969 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20090-596807/.minikube/profiles/addons-248098/apiserver.key.2386a425: {Name:mke16db69a70a4e768d2fcef5a36f02309bb7b5b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1213 19:18:58.652951 602969 certs.go:381] copying /home/jenkins/minikube-integration/20090-596807/.minikube/profiles/addons-248098/apiserver.crt.2386a425 -> /home/jenkins/minikube-integration/20090-596807/.minikube/profiles/addons-248098/apiserver.crt
I1213 19:18:58.653040 602969 certs.go:385] copying /home/jenkins/minikube-integration/20090-596807/.minikube/profiles/addons-248098/apiserver.key.2386a425 -> /home/jenkins/minikube-integration/20090-596807/.minikube/profiles/addons-248098/apiserver.key
I1213 19:18:58.653091 602969 certs.go:363] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/20090-596807/.minikube/profiles/addons-248098/proxy-client.key
I1213 19:18:58.653112 602969 crypto.go:68] Generating cert /home/jenkins/minikube-integration/20090-596807/.minikube/profiles/addons-248098/proxy-client.crt with IP's: []
I1213 19:18:58.926757 602969 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/20090-596807/.minikube/profiles/addons-248098/proxy-client.crt ...
I1213 19:18:58.926786 602969 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20090-596807/.minikube/profiles/addons-248098/proxy-client.crt: {Name:mkd3bdca2f1c30fa6d033d08e64b97c34b1ee90f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1213 19:18:58.927544 602969 crypto.go:164] Writing key to /home/jenkins/minikube-integration/20090-596807/.minikube/profiles/addons-248098/proxy-client.key ...
I1213 19:18:58.927566 602969 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20090-596807/.minikube/profiles/addons-248098/proxy-client.key: {Name:mk13a5ea1b680a0acc1fb9a90733ee1b8d555e62 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1213 19:18:58.927773 602969 certs.go:484] found cert: /home/jenkins/minikube-integration/20090-596807/.minikube/certs/ca-key.pem (1675 bytes)
I1213 19:18:58.927819 602969 certs.go:484] found cert: /home/jenkins/minikube-integration/20090-596807/.minikube/certs/ca.pem (1082 bytes)
I1213 19:18:58.927848 602969 certs.go:484] found cert: /home/jenkins/minikube-integration/20090-596807/.minikube/certs/cert.pem (1123 bytes)
I1213 19:18:58.927877 602969 certs.go:484] found cert: /home/jenkins/minikube-integration/20090-596807/.minikube/certs/key.pem (1679 bytes)
I1213 19:18:58.928547 602969 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20090-596807/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1213 19:18:58.976754 602969 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20090-596807/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1213 19:18:59.020377 602969 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20090-596807/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1213 19:18:59.047812 602969 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20090-596807/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I1213 19:18:59.073635 602969 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20090-596807/.minikube/profiles/addons-248098/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1419 bytes)
I1213 19:18:59.099274 602969 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20090-596807/.minikube/profiles/addons-248098/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
I1213 19:18:59.124829 602969 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20090-596807/.minikube/profiles/addons-248098/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1213 19:18:59.150551 602969 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20090-596807/.minikube/profiles/addons-248098/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I1213 19:18:59.175603 602969 ssh_runner.go:362] scp /home/jenkins/minikube-integration/20090-596807/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1213 19:18:59.200255 602969 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1213 19:18:59.218487 602969 ssh_runner.go:195] Run: openssl version
I1213 19:18:59.224085 602969 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1213 19:18:59.233761 602969 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1213 19:18:59.237304 602969 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Dec 13 19:18 /usr/share/ca-certificates/minikubeCA.pem
I1213 19:18:59.237375 602969 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1213 19:18:59.244920 602969 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1213 19:18:59.254323 602969 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1213 19:18:59.257610 602969 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I1213 19:18:59.257675 602969 kubeadm.go:392] StartCluster: {Name:addons-248098 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1734029593-20090@sha256:7b3f6168a578563fb342f21f0c926652b91ba060931e8fbc6c6ade3ac1d26ed9 Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.2 ClusterName:addons-248098 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames
:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.31.2 ContainerRuntime:crio ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmware
Path: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1213 19:18:59.257768 602969 cri.go:54] listing CRI containers in root : {State:paused Name: Namespaces:[kube-system]}
I1213 19:18:59.257862 602969 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I1213 19:18:59.301228 602969 cri.go:89] found id: ""
I1213 19:18:59.301305 602969 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1213 19:18:59.310353 602969 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1213 19:18:59.319841 602969 kubeadm.go:214] ignoring SystemVerification for kubeadm because of docker driver
I1213 19:18:59.319904 602969 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1213 19:18:59.328815 602969 kubeadm.go:155] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1213 19:18:59.328839 602969 kubeadm.go:157] found existing configuration files:
I1213 19:18:59.328891 602969 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1213 19:18:59.338594 602969 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1213 19:18:59.338663 602969 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1213 19:18:59.347744 602969 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1213 19:18:59.356911 602969 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1213 19:18:59.356991 602969 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1213 19:18:59.365506 602969 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1213 19:18:59.374420 602969 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1213 19:18:59.374491 602969 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1213 19:18:59.383136 602969 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1213 19:18:59.392580 602969 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1213 19:18:59.392655 602969 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1213 19:18:59.401105 602969 ssh_runner.go:286] Start: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.31.2:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I1213 19:18:59.449629 602969 kubeadm.go:310] [init] Using Kubernetes version: v1.31.2
I1213 19:18:59.449991 602969 kubeadm.go:310] [preflight] Running pre-flight checks
I1213 19:18:59.470150 602969 kubeadm.go:310] [preflight] The system verification failed. Printing the output from the verification:
I1213 19:18:59.470313 602969 kubeadm.go:310] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1072-aws[0m
I1213 19:18:59.470373 602969 kubeadm.go:310] [0;37mOS[0m: [0;32mLinux[0m
I1213 19:18:59.470453 602969 kubeadm.go:310] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I1213 19:18:59.470524 602969 kubeadm.go:310] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I1213 19:18:59.470597 602969 kubeadm.go:310] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I1213 19:18:59.470667 602969 kubeadm.go:310] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I1213 19:18:59.470745 602969 kubeadm.go:310] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I1213 19:18:59.470813 602969 kubeadm.go:310] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I1213 19:18:59.470888 602969 kubeadm.go:310] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I1213 19:18:59.470957 602969 kubeadm.go:310] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I1213 19:18:59.471051 602969 kubeadm.go:310] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I1213 19:18:59.528975 602969 kubeadm.go:310] [preflight] Pulling images required for setting up a Kubernetes cluster
I1213 19:18:59.529091 602969 kubeadm.go:310] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1213 19:18:59.529189 602969 kubeadm.go:310] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I1213 19:18:59.536172 602969 kubeadm.go:310] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1213 19:18:59.540072 602969 out.go:235] - Generating certificates and keys ...
I1213 19:18:59.540200 602969 kubeadm.go:310] [certs] Using existing ca certificate authority
I1213 19:18:59.540286 602969 kubeadm.go:310] [certs] Using existing apiserver certificate and key on disk
I1213 19:19:00.246678 602969 kubeadm.go:310] [certs] Generating "apiserver-kubelet-client" certificate and key
I1213 19:19:00.785838 602969 kubeadm.go:310] [certs] Generating "front-proxy-ca" certificate and key
I1213 19:19:01.636131 602969 kubeadm.go:310] [certs] Generating "front-proxy-client" certificate and key
I1213 19:19:02.024791 602969 kubeadm.go:310] [certs] Generating "etcd/ca" certificate and key
I1213 19:19:02.790385 602969 kubeadm.go:310] [certs] Generating "etcd/server" certificate and key
I1213 19:19:02.790765 602969 kubeadm.go:310] [certs] etcd/server serving cert is signed for DNS names [addons-248098 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I1213 19:19:03.407514 602969 kubeadm.go:310] [certs] Generating "etcd/peer" certificate and key
I1213 19:19:03.407674 602969 kubeadm.go:310] [certs] etcd/peer serving cert is signed for DNS names [addons-248098 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I1213 19:19:04.222280 602969 kubeadm.go:310] [certs] Generating "etcd/healthcheck-client" certificate and key
I1213 19:19:04.641177 602969 kubeadm.go:310] [certs] Generating "apiserver-etcd-client" certificate and key
I1213 19:19:05.202907 602969 kubeadm.go:310] [certs] Generating "sa" key and public key
I1213 19:19:05.203140 602969 kubeadm.go:310] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1213 19:19:06.009479 602969 kubeadm.go:310] [kubeconfig] Writing "admin.conf" kubeconfig file
I1213 19:19:06.181840 602969 kubeadm.go:310] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I1213 19:19:07.103019 602969 kubeadm.go:310] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1213 19:19:07.437209 602969 kubeadm.go:310] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1213 19:19:08.145533 602969 kubeadm.go:310] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1213 19:19:08.146133 602969 kubeadm.go:310] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1213 19:19:08.151058 602969 kubeadm.go:310] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1213 19:19:08.153615 602969 out.go:235] - Booting up control plane ...
I1213 19:19:08.153730 602969 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1213 19:19:08.153813 602969 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1213 19:19:08.154897 602969 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1213 19:19:08.164962 602969 kubeadm.go:310] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1213 19:19:08.172281 602969 kubeadm.go:310] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1213 19:19:08.172339 602969 kubeadm.go:310] [kubelet-start] Starting the kubelet
I1213 19:19:08.257882 602969 kubeadm.go:310] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I1213 19:19:08.258008 602969 kubeadm.go:310] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I1213 19:19:09.259532 602969 kubeadm.go:310] [kubelet-check] The kubelet is healthy after 1.001649368s
I1213 19:19:09.259630 602969 kubeadm.go:310] [api-check] Waiting for a healthy API server. This can take up to 4m0s
I1213 19:19:15.761647 602969 kubeadm.go:310] [api-check] The API server is healthy after 6.502180938s
I1213 19:19:15.781085 602969 kubeadm.go:310] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1213 19:19:15.796568 602969 kubeadm.go:310] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1213 19:19:15.828071 602969 kubeadm.go:310] [upload-certs] Skipping phase. Please see --upload-certs
I1213 19:19:15.828279 602969 kubeadm.go:310] [mark-control-plane] Marking the node addons-248098 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1213 19:19:15.843567 602969 kubeadm.go:310] [bootstrap-token] Using token: j5o3j6.zgtne4vwby5cxh24
I1213 19:19:15.845663 602969 out.go:235] - Configuring RBAC rules ...
I1213 19:19:15.845800 602969 kubeadm.go:310] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1213 19:19:15.851702 602969 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1213 19:19:15.859463 602969 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1213 19:19:15.863643 602969 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1213 19:19:15.867638 602969 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1213 19:19:15.872771 602969 kubeadm.go:310] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1213 19:19:16.168591 602969 kubeadm.go:310] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1213 19:19:16.629547 602969 kubeadm.go:310] [addons] Applied essential addon: CoreDNS
I1213 19:19:17.174865 602969 kubeadm.go:310] [addons] Applied essential addon: kube-proxy
I1213 19:19:17.174896 602969 kubeadm.go:310]
I1213 19:19:17.174968 602969 kubeadm.go:310] Your Kubernetes control-plane has initialized successfully!
I1213 19:19:17.174973 602969 kubeadm.go:310]
I1213 19:19:17.175100 602969 kubeadm.go:310] To start using your cluster, you need to run the following as a regular user:
I1213 19:19:17.175112 602969 kubeadm.go:310]
I1213 19:19:17.175138 602969 kubeadm.go:310] mkdir -p $HOME/.kube
I1213 19:19:17.175211 602969 kubeadm.go:310] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1213 19:19:17.175304 602969 kubeadm.go:310] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1213 19:19:17.175319 602969 kubeadm.go:310]
I1213 19:19:17.175382 602969 kubeadm.go:310] Alternatively, if you are the root user, you can run:
I1213 19:19:17.175388 602969 kubeadm.go:310]
I1213 19:19:17.175454 602969 kubeadm.go:310] export KUBECONFIG=/etc/kubernetes/admin.conf
I1213 19:19:17.175463 602969 kubeadm.go:310]
I1213 19:19:17.175520 602969 kubeadm.go:310] You should now deploy a pod network to the cluster.
I1213 19:19:17.175628 602969 kubeadm.go:310] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1213 19:19:17.175704 602969 kubeadm.go:310] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1213 19:19:17.175709 602969 kubeadm.go:310]
I1213 19:19:17.175822 602969 kubeadm.go:310] You can now join any number of control-plane nodes by copying certificate authorities
I1213 19:19:17.175932 602969 kubeadm.go:310] and service account keys on each node and then running the following as root:
I1213 19:19:17.175945 602969 kubeadm.go:310]
I1213 19:19:17.176058 602969 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token j5o3j6.zgtne4vwby5cxh24 \
I1213 19:19:17.176186 602969 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:3a4ff1c2a595792db2f2ca4f26d9011086ca3d6e4619c022e611d1580ec6ebd4 \
I1213 19:19:17.176222 602969 kubeadm.go:310] --control-plane
I1213 19:19:17.176233 602969 kubeadm.go:310]
I1213 19:19:17.176328 602969 kubeadm.go:310] Then you can join any number of worker nodes by running the following on each as root:
I1213 19:19:17.176337 602969 kubeadm.go:310]
I1213 19:19:17.176420 602969 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token j5o3j6.zgtne4vwby5cxh24 \
I1213 19:19:17.176556 602969 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:3a4ff1c2a595792db2f2ca4f26d9011086ca3d6e4619c022e611d1580ec6ebd4
I1213 19:19:17.176811 602969 kubeadm.go:310] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1072-aws\n", err: exit status 1
I1213 19:19:17.176946 602969 kubeadm.go:310] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1213 19:19:17.176973 602969 cni.go:84] Creating CNI manager for ""
I1213 19:19:17.176982 602969 cni.go:143] "docker" driver + "crio" runtime found, recommending kindnet
I1213 19:19:17.180500 602969 out.go:177] * Configuring CNI (Container Networking Interface) ...
I1213 19:19:17.182504 602969 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I1213 19:19:17.186376 602969 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.31.2/kubectl ...
I1213 19:19:17.186397 602969 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I1213 19:19:17.205884 602969 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.2/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I1213 19:19:17.486806 602969 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1213 19:19:17.486941 602969 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.2/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1213 19:19:17.487025 602969 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.2/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes addons-248098 minikube.k8s.io/updated_at=2024_12_13T19_19_17_0700 minikube.k8s.io/version=v1.34.0 minikube.k8s.io/commit=68ea3eca706f73191794a96e3518c1d004192956 minikube.k8s.io/name=addons-248098 minikube.k8s.io/primary=true
I1213 19:19:17.495888 602969 ops.go:34] apiserver oom_adj: -16
I1213 19:19:17.641903 602969 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1213 19:19:18.141946 602969 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1213 19:19:18.642947 602969 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1213 19:19:19.142076 602969 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1213 19:19:19.642844 602969 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1213 19:19:20.141993 602969 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1213 19:19:20.642024 602969 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1213 19:19:21.142539 602969 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.2/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1213 19:19:21.228991 602969 kubeadm.go:1113] duration metric: took 3.742096798s to wait for elevateKubeSystemPrivileges
I1213 19:19:21.229030 602969 kubeadm.go:394] duration metric: took 21.971375826s to StartCluster
I1213 19:19:21.229051 602969 settings.go:142] acquiring lock: {Name:mka9b7535bd979f27733ffa8cb9f79579fa32ca5 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1213 19:19:21.229190 602969 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/20090-596807/kubeconfig
I1213 19:19:21.229583 602969 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/20090-596807/kubeconfig: {Name:mka5435b4dfc150b8392bc985a52cf22d376e8bc Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1213 19:19:21.230376 602969 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.2/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1213 19:19:21.230408 602969 start.go:235] Will wait 6m0s for node &{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.31.2 ContainerRuntime:crio ControlPlane:true Worker:true}
I1213 19:19:21.230640 602969 config.go:182] Loaded profile config "addons-248098": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.31.2
I1213 19:19:21.230676 602969 addons.go:507] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:true auto-pause:false cloud-spanner:true csi-hostpath-driver:true dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:true gvisor:false headlamp:false inaccel:false ingress:true ingress-dns:true inspektor-gadget:true istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:true nvidia-device-plugin:true nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:true registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:true volcano:true volumesnapshots:true yakd:true]
I1213 19:19:21.230746 602969 addons.go:69] Setting yakd=true in profile "addons-248098"
I1213 19:19:21.230759 602969 addons.go:234] Setting addon yakd=true in "addons-248098"
I1213 19:19:21.230782 602969 host.go:66] Checking if "addons-248098" exists ...
I1213 19:19:21.231257 602969 cli_runner.go:164] Run: docker container inspect addons-248098 --format={{.State.Status}}
I1213 19:19:21.231623 602969 addons.go:69] Setting inspektor-gadget=true in profile "addons-248098"
I1213 19:19:21.231651 602969 addons.go:234] Setting addon inspektor-gadget=true in "addons-248098"
I1213 19:19:21.231687 602969 host.go:66] Checking if "addons-248098" exists ...
I1213 19:19:21.232164 602969 cli_runner.go:164] Run: docker container inspect addons-248098 --format={{.State.Status}}
I1213 19:19:21.232320 602969 addons.go:69] Setting metrics-server=true in profile "addons-248098"
I1213 19:19:21.232341 602969 addons.go:234] Setting addon metrics-server=true in "addons-248098"
I1213 19:19:21.232366 602969 host.go:66] Checking if "addons-248098" exists ...
I1213 19:19:21.232771 602969 cli_runner.go:164] Run: docker container inspect addons-248098 --format={{.State.Status}}
I1213 19:19:21.233276 602969 addons.go:69] Setting nvidia-device-plugin=true in profile "addons-248098"
I1213 19:19:21.233302 602969 addons.go:234] Setting addon nvidia-device-plugin=true in "addons-248098"
I1213 19:19:21.233330 602969 host.go:66] Checking if "addons-248098" exists ...
I1213 19:19:21.233747 602969 cli_runner.go:164] Run: docker container inspect addons-248098 --format={{.State.Status}}
I1213 19:19:21.235082 602969 addons.go:69] Setting amd-gpu-device-plugin=true in profile "addons-248098"
I1213 19:19:21.235115 602969 addons.go:234] Setting addon amd-gpu-device-plugin=true in "addons-248098"
I1213 19:19:21.235145 602969 host.go:66] Checking if "addons-248098" exists ...
I1213 19:19:21.235594 602969 cli_runner.go:164] Run: docker container inspect addons-248098 --format={{.State.Status}}
I1213 19:19:21.236644 602969 addons.go:69] Setting registry=true in profile "addons-248098"
I1213 19:19:21.236672 602969 addons.go:234] Setting addon registry=true in "addons-248098"
I1213 19:19:21.236702 602969 host.go:66] Checking if "addons-248098" exists ...
I1213 19:19:21.237136 602969 cli_runner.go:164] Run: docker container inspect addons-248098 --format={{.State.Status}}
I1213 19:19:21.240546 602969 addons.go:69] Setting cloud-spanner=true in profile "addons-248098"
I1213 19:19:21.240607 602969 addons.go:234] Setting addon cloud-spanner=true in "addons-248098"
I1213 19:19:21.240646 602969 host.go:66] Checking if "addons-248098" exists ...
I1213 19:19:21.241341 602969 cli_runner.go:164] Run: docker container inspect addons-248098 --format={{.State.Status}}
I1213 19:19:21.255273 602969 addons.go:69] Setting storage-provisioner=true in profile "addons-248098"
I1213 19:19:21.255307 602969 addons.go:234] Setting addon storage-provisioner=true in "addons-248098"
I1213 19:19:21.255344 602969 host.go:66] Checking if "addons-248098" exists ...
I1213 19:19:21.255819 602969 cli_runner.go:164] Run: docker container inspect addons-248098 --format={{.State.Status}}
I1213 19:19:21.255999 602969 addons.go:69] Setting csi-hostpath-driver=true in profile "addons-248098"
I1213 19:19:21.256040 602969 addons.go:234] Setting addon csi-hostpath-driver=true in "addons-248098"
I1213 19:19:21.256063 602969 host.go:66] Checking if "addons-248098" exists ...
I1213 19:19:21.265178 602969 addons.go:69] Setting default-storageclass=true in profile "addons-248098"
I1213 19:19:21.265277 602969 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "addons-248098"
I1213 19:19:21.266138 602969 addons.go:69] Setting storage-provisioner-rancher=true in profile "addons-248098"
I1213 19:19:21.266224 602969 addons_storage_classes.go:33] enableOrDisableStorageClasses storage-provisioner-rancher=true on "addons-248098"
I1213 19:19:21.266617 602969 cli_runner.go:164] Run: docker container inspect addons-248098 --format={{.State.Status}}
I1213 19:19:21.266936 602969 cli_runner.go:164] Run: docker container inspect addons-248098 --format={{.State.Status}}
I1213 19:19:21.280307 602969 addons.go:69] Setting gcp-auth=true in profile "addons-248098"
I1213 19:19:21.284706 602969 mustload.go:65] Loading cluster: addons-248098
I1213 19:19:21.284944 602969 config.go:182] Loaded profile config "addons-248098": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.31.2
I1213 19:19:21.285251 602969 cli_runner.go:164] Run: docker container inspect addons-248098 --format={{.State.Status}}
I1213 19:19:21.290520 602969 addons.go:69] Setting volcano=true in profile "addons-248098"
I1213 19:19:21.290615 602969 addons.go:234] Setting addon volcano=true in "addons-248098"
I1213 19:19:21.290692 602969 host.go:66] Checking if "addons-248098" exists ...
I1213 19:19:21.291333 602969 cli_runner.go:164] Run: docker container inspect addons-248098 --format={{.State.Status}}
I1213 19:19:21.299441 602969 addons.go:69] Setting ingress=true in profile "addons-248098"
I1213 19:19:21.299520 602969 addons.go:234] Setting addon ingress=true in "addons-248098"
I1213 19:19:21.299632 602969 host.go:66] Checking if "addons-248098" exists ...
I1213 19:19:21.300592 602969 cli_runner.go:164] Run: docker container inspect addons-248098 --format={{.State.Status}}
I1213 19:19:21.310583 602969 addons.go:69] Setting volumesnapshots=true in profile "addons-248098"
I1213 19:19:21.310623 602969 addons.go:234] Setting addon volumesnapshots=true in "addons-248098"
I1213 19:19:21.310661 602969 host.go:66] Checking if "addons-248098" exists ...
I1213 19:19:21.311152 602969 cli_runner.go:164] Run: docker container inspect addons-248098 --format={{.State.Status}}
I1213 19:19:21.322127 602969 addons.go:69] Setting ingress-dns=true in profile "addons-248098"
I1213 19:19:21.322221 602969 addons.go:234] Setting addon ingress-dns=true in "addons-248098"
I1213 19:19:21.322359 602969 host.go:66] Checking if "addons-248098" exists ...
I1213 19:19:21.322982 602969 cli_runner.go:164] Run: docker container inspect addons-248098 --format={{.State.Status}}
I1213 19:19:21.325448 602969 out.go:177] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1213 19:19:21.328554 602969 addons.go:431] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1213 19:19:21.328581 602969 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1213 19:19:21.328649 602969 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-248098
I1213 19:19:21.333576 602969 out.go:177] * Verifying Kubernetes components...
I1213 19:19:21.353965 602969 cli_runner.go:164] Run: docker container inspect addons-248098 --format={{.State.Status}}
I1213 19:19:21.380274 602969 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1213 19:19:21.412609 602969 out.go:177] - Using image docker.io/rocm/k8s-device-plugin:1.25.2.8
I1213 19:19:21.420870 602969 addons.go:431] installing /etc/kubernetes/addons/amd-gpu-device-plugin.yaml
I1213 19:19:21.420936 602969 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/amd-gpu-device-plugin.yaml (1868 bytes)
I1213 19:19:21.421040 602969 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-248098
I1213 19:19:21.438711 602969 out.go:177] - Using image nvcr.io/nvidia/k8s-device-plugin:v0.17.0
I1213 19:19:21.439164 602969 out.go:177] - Using image docker.io/marcnuri/yakd:0.0.5
I1213 19:19:21.441663 602969 out.go:177] - Using image docker.io/registry:2.8.3
I1213 19:19:21.442042 602969 out.go:177] - Using image ghcr.io/inspektor-gadget/inspektor-gadget:v0.35.0
I1213 19:19:21.447223 602969 out.go:177] - Using image registry.k8s.io/sig-storage/snapshot-controller:v6.1.0
I1213 19:19:21.448274 602969 addons.go:431] installing /etc/kubernetes/addons/nvidia-device-plugin.yaml
I1213 19:19:21.448331 602969 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/nvidia-device-plugin.yaml (1966 bytes)
I1213 19:19:21.448420 602969 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-248098
I1213 19:19:21.459755 602969 out.go:177] - Using image gcr.io/cloud-spanner-emulator/emulator:1.5.25
I1213 19:19:21.462611 602969 addons.go:431] installing /etc/kubernetes/addons/yakd-ns.yaml
I1213 19:19:21.462680 602969 ssh_runner.go:362] scp yakd/yakd-ns.yaml --> /etc/kubernetes/addons/yakd-ns.yaml (171 bytes)
I1213 19:19:21.462784 602969 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-248098
I1213 19:19:21.463994 602969 addons.go:431] installing /etc/kubernetes/addons/deployment.yaml
I1213 19:19:21.464049 602969 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/deployment.yaml (1004 bytes)
I1213 19:19:21.464133 602969 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-248098
I1213 19:19:21.482413 602969 out.go:177] - Using image registry.k8s.io/metrics-server/metrics-server:v0.7.2
I1213 19:19:21.482669 602969 addons.go:431] installing /etc/kubernetes/addons/ig-crd.yaml
I1213 19:19:21.482683 602969 ssh_runner.go:362] scp inspektor-gadget/ig-crd.yaml --> /etc/kubernetes/addons/ig-crd.yaml (5248 bytes)
I1213 19:19:21.482759 602969 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-248098
I1213 19:19:21.498395 602969 out.go:177] - Using image registry.k8s.io/ingress-nginx/controller:v1.11.3
I1213 19:19:21.503674 602969 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.4
I1213 19:19:21.508350 602969 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.4
I1213 19:19:21.508597 602969 addons.go:431] installing /etc/kubernetes/addons/metrics-apiservice.yaml
I1213 19:19:21.508617 602969 ssh_runner.go:362] scp metrics-server/metrics-apiservice.yaml --> /etc/kubernetes/addons/metrics-apiservice.yaml (424 bytes)
I1213 19:19:21.508684 602969 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-248098
I1213 19:19:21.483797 602969 host.go:66] Checking if "addons-248098" exists ...
I1213 19:19:21.495853 602969 addons.go:234] Setting addon storage-provisioner-rancher=true in "addons-248098"
I1213 19:19:21.510702 602969 host.go:66] Checking if "addons-248098" exists ...
I1213 19:19:21.511144 602969 cli_runner.go:164] Run: docker container inspect addons-248098 --format={{.State.Status}}
W1213 19:19:21.523070 602969 out.go:270] ! Enabling 'volcano' returned an error: running callbacks: [volcano addon does not support crio]
I1213 19:19:21.495897 602969 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml
I1213 19:19:21.525493 602969 ssh_runner.go:362] scp volumesnapshots/csi-hostpath-snapshotclass.yaml --> /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml (934 bytes)
I1213 19:19:21.525560 602969 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-248098
I1213 19:19:21.531786 602969 out.go:177] - Using image gcr.io/k8s-minikube/minikube-ingress-dns:0.0.3
I1213 19:19:21.534286 602969 addons.go:431] installing /etc/kubernetes/addons/ingress-dns-pod.yaml
I1213 19:19:21.534309 602969 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ingress-dns-pod.yaml (2442 bytes)
I1213 19:19:21.534383 602969 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-248098
I1213 19:19:21.546429 602969 out.go:177] - Using image gcr.io/k8s-minikube/kube-registry-proxy:0.0.8
I1213 19:19:21.548949 602969 addons.go:431] installing /etc/kubernetes/addons/registry-rc.yaml
I1213 19:19:21.548974 602969 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-rc.yaml (860 bytes)
I1213 19:19:21.549037 602969 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-248098
I1213 19:19:21.570396 602969 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33512 SSHKeyPath:/home/jenkins/minikube-integration/20090-596807/.minikube/machines/addons-248098/id_rsa Username:docker}
I1213 19:19:21.577265 602969 addons.go:431] installing /etc/kubernetes/addons/ingress-deploy.yaml
I1213 19:19:21.577286 602969 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ingress-deploy.yaml (16078 bytes)
I1213 19:19:21.577348 602969 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-248098
I1213 19:19:21.591406 602969 out.go:177] - Using image registry.k8s.io/sig-storage/livenessprobe:v2.8.0
I1213 19:19:21.593808 602969 out.go:177] - Using image registry.k8s.io/sig-storage/csi-resizer:v1.6.0
I1213 19:19:21.595784 602969 out.go:177] - Using image registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0
I1213 19:19:21.603024 602969 out.go:177] - Using image registry.k8s.io/sig-storage/csi-provisioner:v3.3.0
I1213 19:19:21.605198 602969 out.go:177] - Using image registry.k8s.io/sig-storage/csi-attacher:v4.0.0
I1213 19:19:21.607893 602969 addons.go:234] Setting addon default-storageclass=true in "addons-248098"
I1213 19:19:21.607928 602969 host.go:66] Checking if "addons-248098" exists ...
I1213 19:19:21.608339 602969 cli_runner.go:164] Run: docker container inspect addons-248098 --format={{.State.Status}}
I1213 19:19:21.610620 602969 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33512 SSHKeyPath:/home/jenkins/minikube-integration/20090-596807/.minikube/machines/addons-248098/id_rsa Username:docker}
I1213 19:19:21.613014 602969 out.go:177] - Using image registry.k8s.io/sig-storage/csi-external-health-monitor-controller:v0.7.0
I1213 19:19:21.615144 602969 out.go:177] - Using image registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.6.0
I1213 19:19:21.615254 602969 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33512 SSHKeyPath:/home/jenkins/minikube-integration/20090-596807/.minikube/machines/addons-248098/id_rsa Username:docker}
I1213 19:19:21.623841 602969 out.go:177] - Using image registry.k8s.io/sig-storage/hostpathplugin:v1.9.0
I1213 19:19:21.630322 602969 addons.go:431] installing /etc/kubernetes/addons/rbac-external-attacher.yaml
I1213 19:19:21.630356 602969 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-attacher.yaml --> /etc/kubernetes/addons/rbac-external-attacher.yaml (3073 bytes)
I1213 19:19:21.630446 602969 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-248098
I1213 19:19:21.643406 602969 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33512 SSHKeyPath:/home/jenkins/minikube-integration/20090-596807/.minikube/machines/addons-248098/id_rsa Username:docker}
I1213 19:19:21.715448 602969 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33512 SSHKeyPath:/home/jenkins/minikube-integration/20090-596807/.minikube/machines/addons-248098/id_rsa Username:docker}
I1213 19:19:21.716808 602969 out.go:177] - Using image docker.io/rancher/local-path-provisioner:v0.0.22
I1213 19:19:21.719377 602969 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33512 SSHKeyPath:/home/jenkins/minikube-integration/20090-596807/.minikube/machines/addons-248098/id_rsa Username:docker}
I1213 19:19:21.720893 602969 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33512 SSHKeyPath:/home/jenkins/minikube-integration/20090-596807/.minikube/machines/addons-248098/id_rsa Username:docker}
I1213 19:19:21.722077 602969 out.go:177] - Using image docker.io/busybox:stable
I1213 19:19:21.724551 602969 addons.go:431] installing /etc/kubernetes/addons/storage-provisioner-rancher.yaml
I1213 19:19:21.724577 602969 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner-rancher.yaml (3113 bytes)
I1213 19:19:21.724644 602969 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-248098
I1213 19:19:21.786959 602969 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33512 SSHKeyPath:/home/jenkins/minikube-integration/20090-596807/.minikube/machines/addons-248098/id_rsa Username:docker}
I1213 19:19:21.806260 602969 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33512 SSHKeyPath:/home/jenkins/minikube-integration/20090-596807/.minikube/machines/addons-248098/id_rsa Username:docker}
I1213 19:19:21.807337 602969 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33512 SSHKeyPath:/home/jenkins/minikube-integration/20090-596807/.minikube/machines/addons-248098/id_rsa Username:docker}
I1213 19:19:21.813786 602969 addons.go:431] installing /etc/kubernetes/addons/storageclass.yaml
I1213 19:19:21.813806 602969 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1213 19:19:21.813880 602969 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-248098
I1213 19:19:21.815114 602969 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33512 SSHKeyPath:/home/jenkins/minikube-integration/20090-596807/.minikube/machines/addons-248098/id_rsa Username:docker}
I1213 19:19:21.831472 602969 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33512 SSHKeyPath:/home/jenkins/minikube-integration/20090-596807/.minikube/machines/addons-248098/id_rsa Username:docker}
I1213 19:19:21.837880 602969 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33512 SSHKeyPath:/home/jenkins/minikube-integration/20090-596807/.minikube/machines/addons-248098/id_rsa Username:docker}
I1213 19:19:21.856326 602969 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1213 19:19:21.869818 602969 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33512 SSHKeyPath:/home/jenkins/minikube-integration/20090-596807/.minikube/machines/addons-248098/id_rsa Username:docker}
I1213 19:19:21.923870 602969 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply -f /etc/kubernetes/addons/amd-gpu-device-plugin.yaml
I1213 19:19:22.001811 602969 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply -f /etc/kubernetes/addons/nvidia-device-plugin.yaml
I1213 19:19:22.058636 602969 addons.go:431] installing /etc/kubernetes/addons/yakd-sa.yaml
I1213 19:19:22.058671 602969 ssh_runner.go:362] scp yakd/yakd-sa.yaml --> /etc/kubernetes/addons/yakd-sa.yaml (247 bytes)
I1213 19:19:22.180538 602969 addons.go:431] installing /etc/kubernetes/addons/metrics-server-deployment.yaml
I1213 19:19:22.180560 602969 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-deployment.yaml (1907 bytes)
I1213 19:19:22.187506 602969 addons.go:431] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml
I1213 19:19:22.187588 602969 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshotclasses.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml (6471 bytes)
I1213 19:19:22.221301 602969 addons.go:431] installing /etc/kubernetes/addons/rbac-hostpath.yaml
I1213 19:19:22.221406 602969 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-hostpath.yaml --> /etc/kubernetes/addons/rbac-hostpath.yaml (4266 bytes)
I1213 19:19:22.245357 602969 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply -f /etc/kubernetes/addons/ingress-dns-pod.yaml
I1213 19:19:22.251332 602969 addons.go:431] installing /etc/kubernetes/addons/yakd-crb.yaml
I1213 19:19:22.251408 602969 ssh_runner.go:362] scp yakd/yakd-crb.yaml --> /etc/kubernetes/addons/yakd-crb.yaml (422 bytes)
I1213 19:19:22.272828 602969 addons.go:431] installing /etc/kubernetes/addons/ig-deployment.yaml
I1213 19:19:22.272865 602969 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-deployment.yaml (14576 bytes)
I1213 19:19:22.287633 602969 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply -f /etc/kubernetes/addons/deployment.yaml
I1213 19:19:22.292810 602969 addons.go:431] installing /etc/kubernetes/addons/registry-svc.yaml
I1213 19:19:22.292893 602969 ssh_runner.go:362] scp registry/registry-svc.yaml --> /etc/kubernetes/addons/registry-svc.yaml (398 bytes)
I1213 19:19:22.302161 602969 addons.go:431] installing /etc/kubernetes/addons/metrics-server-rbac.yaml
I1213 19:19:22.302249 602969 ssh_runner.go:362] scp metrics-server/metrics-server-rbac.yaml --> /etc/kubernetes/addons/metrics-server-rbac.yaml (2175 bytes)
I1213 19:19:22.308889 602969 addons.go:431] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml
I1213 19:19:22.308959 602969 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshotcontents.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml (23126 bytes)
I1213 19:19:22.367800 602969 addons.go:431] installing /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml
I1213 19:19:22.367888 602969 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-health-monitor-controller.yaml --> /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml (3038 bytes)
I1213 19:19:22.381548 602969 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply -f /etc/kubernetes/addons/storage-provisioner-rancher.yaml
I1213 19:19:22.418969 602969 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1213 19:19:22.427226 602969 addons.go:431] installing /etc/kubernetes/addons/metrics-server-service.yaml
I1213 19:19:22.427295 602969 ssh_runner.go:362] scp metrics-server/metrics-server-service.yaml --> /etc/kubernetes/addons/metrics-server-service.yaml (446 bytes)
I1213 19:19:22.460529 602969 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply -f /etc/kubernetes/addons/ingress-deploy.yaml
I1213 19:19:22.487312 602969 addons.go:431] installing /etc/kubernetes/addons/yakd-svc.yaml
I1213 19:19:22.487419 602969 ssh_runner.go:362] scp yakd/yakd-svc.yaml --> /etc/kubernetes/addons/yakd-svc.yaml (412 bytes)
I1213 19:19:22.492906 602969 addons.go:431] installing /etc/kubernetes/addons/rbac-external-provisioner.yaml
I1213 19:19:22.492991 602969 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-provisioner.yaml --> /etc/kubernetes/addons/rbac-external-provisioner.yaml (4442 bytes)
I1213 19:19:22.502366 602969 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml
I1213 19:19:22.513766 602969 addons.go:431] installing /etc/kubernetes/addons/registry-proxy.yaml
I1213 19:19:22.513843 602969 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-proxy.yaml (947 bytes)
I1213 19:19:22.550994 602969 addons.go:431] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml
I1213 19:19:22.551082 602969 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshots.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml (19582 bytes)
I1213 19:19:22.576625 602969 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
I1213 19:19:22.622394 602969 addons.go:431] installing /etc/kubernetes/addons/rbac-external-resizer.yaml
I1213 19:19:22.622461 602969 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-resizer.yaml --> /etc/kubernetes/addons/rbac-external-resizer.yaml (2943 bytes)
I1213 19:19:22.667331 602969 addons.go:431] installing /etc/kubernetes/addons/yakd-dp.yaml
I1213 19:19:22.667409 602969 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/yakd-dp.yaml (2017 bytes)
I1213 19:19:22.686529 602969 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply -f /etc/kubernetes/addons/registry-rc.yaml -f /etc/kubernetes/addons/registry-svc.yaml -f /etc/kubernetes/addons/registry-proxy.yaml
I1213 19:19:22.727043 602969 addons.go:431] installing /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml
I1213 19:19:22.727128 602969 ssh_runner.go:362] scp volumesnapshots/rbac-volume-snapshot-controller.yaml --> /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml (3545 bytes)
I1213 19:19:22.730868 602969 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.2/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml": (1.500445437s)
I1213 19:19:22.730988 602969 ssh_runner.go:235] Completed: sudo systemctl daemon-reload: (1.350672368s)
I1213 19:19:22.731177 602969 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1213 19:19:22.731216 602969 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.2/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.31.2/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1213 19:19:22.814545 602969 addons.go:431] installing /etc/kubernetes/addons/rbac-external-snapshotter.yaml
I1213 19:19:22.814622 602969 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-snapshotter.yaml --> /etc/kubernetes/addons/rbac-external-snapshotter.yaml (3149 bytes)
I1213 19:19:22.866479 602969 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply -f /etc/kubernetes/addons/yakd-ns.yaml -f /etc/kubernetes/addons/yakd-sa.yaml -f /etc/kubernetes/addons/yakd-crb.yaml -f /etc/kubernetes/addons/yakd-svc.yaml -f /etc/kubernetes/addons/yakd-dp.yaml
I1213 19:19:22.952807 602969 addons.go:431] installing /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I1213 19:19:22.952888 602969 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml (1475 bytes)
I1213 19:19:23.027535 602969 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-attacher.yaml
I1213 19:19:23.027616 602969 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-attacher.yaml (2143 bytes)
I1213 19:19:23.096867 602969 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I1213 19:19:23.140904 602969 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml
I1213 19:19:23.140974 602969 ssh_runner.go:362] scp csi-hostpath-driver/deploy/csi-hostpath-driverinfo.yaml --> /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml (1274 bytes)
I1213 19:19:23.213097 602969 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-plugin.yaml
I1213 19:19:23.213164 602969 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-plugin.yaml (8201 bytes)
I1213 19:19:23.284483 602969 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-resizer.yaml
I1213 19:19:23.284559 602969 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-resizer.yaml (2191 bytes)
I1213 19:19:23.313960 602969 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-storageclass.yaml
I1213 19:19:23.314030 602969 ssh_runner.go:362] scp csi-hostpath-driver/deploy/csi-hostpath-storageclass.yaml --> /etc/kubernetes/addons/csi-hostpath-storageclass.yaml (846 bytes)
I1213 19:19:23.403457 602969 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply -f /etc/kubernetes/addons/rbac-external-attacher.yaml -f /etc/kubernetes/addons/rbac-hostpath.yaml -f /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml -f /etc/kubernetes/addons/rbac-external-provisioner.yaml -f /etc/kubernetes/addons/rbac-external-resizer.yaml -f /etc/kubernetes/addons/rbac-external-snapshotter.yaml -f /etc/kubernetes/addons/csi-hostpath-attacher.yaml -f /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml -f /etc/kubernetes/addons/csi-hostpath-plugin.yaml -f /etc/kubernetes/addons/csi-hostpath-resizer.yaml -f /etc/kubernetes/addons/csi-hostpath-storageclass.yaml
I1213 19:19:26.482868 602969 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply -f /etc/kubernetes/addons/amd-gpu-device-plugin.yaml: (4.558964552s)
I1213 19:19:26.482971 602969 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply -f /etc/kubernetes/addons/nvidia-device-plugin.yaml: (4.481077701s)
I1213 19:19:26.483045 602969 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply -f /etc/kubernetes/addons/ingress-dns-pod.yaml: (4.237614796s)
I1213 19:19:26.483100 602969 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply -f /etc/kubernetes/addons/deployment.yaml: (4.195445671s)
I1213 19:19:26.483176 602969 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (4.626823049s)
I1213 19:19:26.653244 602969 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply -f /etc/kubernetes/addons/storage-provisioner-rancher.yaml: (4.271604446s)
I1213 19:19:26.653524 602969 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: (4.234460463s)
W1213 19:19:26.730600 602969 out.go:270] ! Enabling 'storage-provisioner-rancher' returned an error: running callbacks: [Error making local-path the default storage class: Error while marking storage class local-path as default: Operation cannot be fulfilled on storageclasses.storage.k8s.io "local-path": the object has been modified; please apply your changes to the latest version and try again]
I1213 19:19:28.119649 602969 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply -f /etc/kubernetes/addons/ingress-deploy.yaml: (5.659025174s)
I1213 19:19:28.119679 602969 addons.go:475] Verifying addon ingress=true in "addons-248098"
I1213 19:19:28.119926 602969 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: (5.617462835s)
I1213 19:19:28.119999 602969 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: (5.543296224s)
I1213 19:19:28.120008 602969 addons.go:475] Verifying addon metrics-server=true in "addons-248098"
I1213 19:19:28.120034 602969 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply -f /etc/kubernetes/addons/registry-rc.yaml -f /etc/kubernetes/addons/registry-svc.yaml -f /etc/kubernetes/addons/registry-proxy.yaml: (5.433440279s)
I1213 19:19:28.120042 602969 addons.go:475] Verifying addon registry=true in "addons-248098"
I1213 19:19:28.120316 602969 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.2/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.31.2/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -": (5.389064075s)
I1213 19:19:28.120347 602969 start.go:971] {"host.minikube.internal": 192.168.49.1} host record injected into CoreDNS's ConfigMap
I1213 19:19:28.121375 602969 ssh_runner.go:235] Completed: sudo systemctl start kubelet: (5.390177239s)
I1213 19:19:28.122131 602969 node_ready.go:35] waiting up to 6m0s for node "addons-248098" to be "Ready" ...
I1213 19:19:28.122346 602969 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply -f /etc/kubernetes/addons/yakd-ns.yaml -f /etc/kubernetes/addons/yakd-sa.yaml -f /etc/kubernetes/addons/yakd-crb.yaml -f /etc/kubernetes/addons/yakd-svc.yaml -f /etc/kubernetes/addons/yakd-dp.yaml: (5.255778687s)
I1213 19:19:28.122682 602969 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: (5.025719663s)
W1213 19:19:28.122720 602969 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: Process exited with status 1
stdout:
customresourcedefinition.apiextensions.k8s.io/volumesnapshotclasses.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshotcontents.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshots.snapshot.storage.k8s.io created
serviceaccount/snapshot-controller created
clusterrole.rbac.authorization.k8s.io/snapshot-controller-runner created
clusterrolebinding.rbac.authorization.k8s.io/snapshot-controller-role created
role.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
rolebinding.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
deployment.apps/snapshot-controller created
stderr:
error: resource mapping not found for name: "csi-hostpath-snapclass" namespace: "" from "/etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml": no matches for kind "VolumeSnapshotClass" in version "snapshot.storage.k8s.io/v1"
ensure CRDs are installed first
I1213 19:19:28.122738 602969 retry.go:31] will retry after 368.887977ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: Process exited with status 1
stdout:
customresourcedefinition.apiextensions.k8s.io/volumesnapshotclasses.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshotcontents.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshots.snapshot.storage.k8s.io created
serviceaccount/snapshot-controller created
clusterrole.rbac.authorization.k8s.io/snapshot-controller-runner created
clusterrolebinding.rbac.authorization.k8s.io/snapshot-controller-role created
role.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
rolebinding.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
deployment.apps/snapshot-controller created
stderr:
error: resource mapping not found for name: "csi-hostpath-snapclass" namespace: "" from "/etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml": no matches for kind "VolumeSnapshotClass" in version "snapshot.storage.k8s.io/v1"
ensure CRDs are installed first
I1213 19:19:28.122848 602969 out.go:177] * Verifying ingress addon...
I1213 19:19:28.122940 602969 out.go:177] * Verifying registry addon...
I1213 19:19:28.125945 602969 out.go:177] * To access YAKD - Kubernetes Dashboard, wait for Pod to be ready and run the following command:
minikube -p addons-248098 service yakd-dashboard -n yakd-dashboard
I1213 19:19:28.126854 602969 kapi.go:75] Waiting for pod with label "app.kubernetes.io/name=ingress-nginx" in ns "ingress-nginx" ...
I1213 19:19:28.127952 602969 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=registry" in ns "kube-system" ...
I1213 19:19:28.178514 602969 kapi.go:86] Found 1 Pods for label selector kubernetes.io/minikube-addons=registry
I1213 19:19:28.178607 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:28.181483 602969 kapi.go:86] Found 3 Pods for label selector app.kubernetes.io/name=ingress-nginx
I1213 19:19:28.181566 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:28.491896 602969 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply --force -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I1213 19:19:28.653877 602969 kapi.go:214] "coredns" deployment in "kube-system" namespace and "addons-248098" context rescaled to 1 replicas
I1213 19:19:28.656611 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:28.656806 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:28.999336 602969 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply -f /etc/kubernetes/addons/rbac-external-attacher.yaml -f /etc/kubernetes/addons/rbac-hostpath.yaml -f /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml -f /etc/kubernetes/addons/rbac-external-provisioner.yaml -f /etc/kubernetes/addons/rbac-external-resizer.yaml -f /etc/kubernetes/addons/rbac-external-snapshotter.yaml -f /etc/kubernetes/addons/csi-hostpath-attacher.yaml -f /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml -f /etc/kubernetes/addons/csi-hostpath-plugin.yaml -f /etc/kubernetes/addons/csi-hostpath-resizer.yaml -f /etc/kubernetes/addons/csi-hostpath-storageclass.yaml: (5.595772838s)
I1213 19:19:28.999422 602969 addons.go:475] Verifying addon csi-hostpath-driver=true in "addons-248098"
I1213 19:19:29.004093 602969 out.go:177] * Verifying csi-hostpath-driver addon...
I1213 19:19:29.007703 602969 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=csi-hostpath-driver" in ns "kube-system" ...
I1213 19:19:29.027443 602969 kapi.go:86] Found 2 Pods for label selector kubernetes.io/minikube-addons=csi-hostpath-driver
I1213 19:19:29.027466 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:29.141297 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:29.142221 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:29.512286 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:29.634043 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:29.635325 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:30.018614 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:30.126763 602969 node_ready.go:53] node "addons-248098" has status "Ready":"False"
I1213 19:19:30.141230 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:30.144333 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:30.511781 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:30.631473 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:30.632040 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:31.013033 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:31.131738 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:31.132589 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:31.512185 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:31.631006 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:31.631908 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:31.862556 602969 ssh_runner.go:362] scp memory --> /var/lib/minikube/google_application_credentials.json (162 bytes)
I1213 19:19:31.862644 602969 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-248098
I1213 19:19:31.881197 602969 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33512 SSHKeyPath:/home/jenkins/minikube-integration/20090-596807/.minikube/machines/addons-248098/id_rsa Username:docker}
I1213 19:19:31.993912 602969 ssh_runner.go:362] scp memory --> /var/lib/minikube/google_cloud_project (12 bytes)
I1213 19:19:32.015203 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:32.029154 602969 addons.go:234] Setting addon gcp-auth=true in "addons-248098"
I1213 19:19:32.029264 602969 host.go:66] Checking if "addons-248098" exists ...
I1213 19:19:32.029785 602969 cli_runner.go:164] Run: docker container inspect addons-248098 --format={{.State.Status}}
I1213 19:19:32.059441 602969 ssh_runner.go:195] Run: cat /var/lib/minikube/google_application_credentials.json
I1213 19:19:32.059508 602969 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-248098
I1213 19:19:32.078992 602969 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33512 SSHKeyPath:/home/jenkins/minikube-integration/20090-596807/.minikube/machines/addons-248098/id_rsa Username:docker}
I1213 19:19:32.131417 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:32.131771 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:32.132258 602969 node_ready.go:53] node "addons-248098" has status "Ready":"False"
I1213 19:19:32.192934 602969 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.4
I1213 19:19:32.195358 602969 out.go:177] - Using image gcr.io/k8s-minikube/gcp-auth-webhook:v0.1.3
I1213 19:19:32.197626 602969 addons.go:431] installing /etc/kubernetes/addons/gcp-auth-ns.yaml
I1213 19:19:32.197657 602969 ssh_runner.go:362] scp gcp-auth/gcp-auth-ns.yaml --> /etc/kubernetes/addons/gcp-auth-ns.yaml (700 bytes)
I1213 19:19:32.216983 602969 addons.go:431] installing /etc/kubernetes/addons/gcp-auth-service.yaml
I1213 19:19:32.217006 602969 ssh_runner.go:362] scp gcp-auth/gcp-auth-service.yaml --> /etc/kubernetes/addons/gcp-auth-service.yaml (788 bytes)
I1213 19:19:32.235808 602969 addons.go:431] installing /etc/kubernetes/addons/gcp-auth-webhook.yaml
I1213 19:19:32.235833 602969 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/gcp-auth-webhook.yaml (5421 bytes)
I1213 19:19:32.255597 602969 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.2/kubectl apply -f /etc/kubernetes/addons/gcp-auth-ns.yaml -f /etc/kubernetes/addons/gcp-auth-service.yaml -f /etc/kubernetes/addons/gcp-auth-webhook.yaml
I1213 19:19:32.513233 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:32.636785 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:32.637301 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:32.775691 602969 addons.go:475] Verifying addon gcp-auth=true in "addons-248098"
I1213 19:19:32.779925 602969 out.go:177] * Verifying gcp-auth addon...
I1213 19:19:32.785024 602969 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=gcp-auth" in ns "gcp-auth" ...
I1213 19:19:32.816723 602969 kapi.go:86] Found 1 Pods for label selector kubernetes.io/minikube-addons=gcp-auth
I1213 19:19:32.816751 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:33.018884 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:33.131804 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:33.133704 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:33.289115 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:33.511882 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:33.631923 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:33.632439 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:33.788543 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:34.012574 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:34.131105 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:34.131521 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:34.288721 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:34.511845 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:34.626878 602969 node_ready.go:53] node "addons-248098" has status "Ready":"False"
I1213 19:19:34.631341 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:34.633312 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:34.789417 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:35.015921 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:35.131195 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:35.131570 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:35.289725 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:35.511913 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:35.631394 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:35.632893 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:35.788325 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:36.012571 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:36.131216 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:36.132275 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:36.288541 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:36.512198 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:36.631774 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:36.633266 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:36.788674 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:37.014562 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:37.125705 602969 node_ready.go:53] node "addons-248098" has status "Ready":"False"
I1213 19:19:37.131666 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:37.132381 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:37.289036 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:37.512079 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:37.631486 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:37.632552 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:37.788860 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:38.013292 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:38.131696 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:38.132255 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:38.288818 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:38.511431 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:38.631017 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:38.631919 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:38.789005 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:39.013018 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:39.125807 602969 node_ready.go:53] node "addons-248098" has status "Ready":"False"
I1213 19:19:39.131912 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:39.132342 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:39.288913 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:39.546103 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:39.634124 602969 node_ready.go:49] node "addons-248098" has status "Ready":"True"
I1213 19:19:39.634153 602969 node_ready.go:38] duration metric: took 11.511992619s for node "addons-248098" to be "Ready" ...
I1213 19:19:39.634164 602969 pod_ready.go:36] extra waiting up to 6m0s for all system-critical pods including labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I1213 19:19:39.648967 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:39.653911 602969 pod_ready.go:79] waiting up to 6m0s for pod "coredns-7c65d6cfc9-bt6ls" in "kube-system" namespace to be "Ready" ...
I1213 19:19:39.656285 602969 kapi.go:86] Found 2 Pods for label selector kubernetes.io/minikube-addons=registry
I1213 19:19:39.656313 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:39.871358 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:40.068754 602969 kapi.go:86] Found 3 Pods for label selector kubernetes.io/minikube-addons=csi-hostpath-driver
I1213 19:19:40.068783 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:40.175155 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:40.176729 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:40.324497 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:40.514084 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:40.631680 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:40.632493 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:40.794699 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:41.015953 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:41.132088 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:41.132663 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:41.160673 602969 pod_ready.go:93] pod "coredns-7c65d6cfc9-bt6ls" in "kube-system" namespace has status "Ready":"True"
I1213 19:19:41.160700 602969 pod_ready.go:82] duration metric: took 1.506750951s for pod "coredns-7c65d6cfc9-bt6ls" in "kube-system" namespace to be "Ready" ...
I1213 19:19:41.160728 602969 pod_ready.go:79] waiting up to 6m0s for pod "etcd-addons-248098" in "kube-system" namespace to be "Ready" ...
I1213 19:19:41.167909 602969 pod_ready.go:93] pod "etcd-addons-248098" in "kube-system" namespace has status "Ready":"True"
I1213 19:19:41.167936 602969 pod_ready.go:82] duration metric: took 7.198218ms for pod "etcd-addons-248098" in "kube-system" namespace to be "Ready" ...
I1213 19:19:41.167950 602969 pod_ready.go:79] waiting up to 6m0s for pod "kube-apiserver-addons-248098" in "kube-system" namespace to be "Ready" ...
I1213 19:19:41.175836 602969 pod_ready.go:93] pod "kube-apiserver-addons-248098" in "kube-system" namespace has status "Ready":"True"
I1213 19:19:41.175861 602969 pod_ready.go:82] duration metric: took 7.896877ms for pod "kube-apiserver-addons-248098" in "kube-system" namespace to be "Ready" ...
I1213 19:19:41.175876 602969 pod_ready.go:79] waiting up to 6m0s for pod "kube-controller-manager-addons-248098" in "kube-system" namespace to be "Ready" ...
I1213 19:19:41.188852 602969 pod_ready.go:93] pod "kube-controller-manager-addons-248098" in "kube-system" namespace has status "Ready":"True"
I1213 19:19:41.188879 602969 pod_ready.go:82] duration metric: took 12.994611ms for pod "kube-controller-manager-addons-248098" in "kube-system" namespace to be "Ready" ...
I1213 19:19:41.188894 602969 pod_ready.go:79] waiting up to 6m0s for pod "kube-proxy-rcbrb" in "kube-system" namespace to be "Ready" ...
I1213 19:19:41.232429 602969 pod_ready.go:93] pod "kube-proxy-rcbrb" in "kube-system" namespace has status "Ready":"True"
I1213 19:19:41.232451 602969 pod_ready.go:82] duration metric: took 43.55018ms for pod "kube-proxy-rcbrb" in "kube-system" namespace to be "Ready" ...
I1213 19:19:41.232462 602969 pod_ready.go:79] waiting up to 6m0s for pod "kube-scheduler-addons-248098" in "kube-system" namespace to be "Ready" ...
I1213 19:19:41.289689 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:41.513507 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:41.626175 602969 pod_ready.go:93] pod "kube-scheduler-addons-248098" in "kube-system" namespace has status "Ready":"True"
I1213 19:19:41.626347 602969 pod_ready.go:82] duration metric: took 393.875067ms for pod "kube-scheduler-addons-248098" in "kube-system" namespace to be "Ready" ...
I1213 19:19:41.626366 602969 pod_ready.go:79] waiting up to 6m0s for pod "metrics-server-84c5f94fbc-g7jcr" in "kube-system" namespace to be "Ready" ...
I1213 19:19:41.635558 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:41.637958 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:41.788507 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:42.026734 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:42.137519 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:42.139399 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:42.289568 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:42.515967 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:42.649268 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:42.651606 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:42.789418 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:43.014152 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:43.133188 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:43.134981 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:43.290443 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:43.512663 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:43.632511 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:43.634455 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:43.635299 602969 pod_ready.go:103] pod "metrics-server-84c5f94fbc-g7jcr" in "kube-system" namespace has status "Ready":"False"
I1213 19:19:43.789909 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:44.014063 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:44.133266 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:44.134645 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:44.288648 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:44.512023 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:44.644352 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:44.646135 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:44.792681 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:45.023186 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:45.149208 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:45.150477 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:45.291751 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:45.512953 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:45.637673 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:45.638589 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:45.640894 602969 pod_ready.go:103] pod "metrics-server-84c5f94fbc-g7jcr" in "kube-system" namespace has status "Ready":"False"
I1213 19:19:45.796714 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:46.016964 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:46.143385 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:46.145482 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:46.289284 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:46.512969 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:46.637122 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:46.641525 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:46.788321 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:47.016793 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:47.139112 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:47.141348 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:47.288749 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:47.513840 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:47.633496 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:47.637674 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:47.789373 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:48.015282 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:48.133087 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:48.134694 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:48.136494 602969 pod_ready.go:103] pod "metrics-server-84c5f94fbc-g7jcr" in "kube-system" namespace has status "Ready":"False"
I1213 19:19:48.289117 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:48.512513 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:48.653344 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:48.660119 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:48.790039 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:49.014997 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:49.144771 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:49.148704 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:49.289337 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:49.512915 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:49.637487 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:49.637748 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:49.795640 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:50.033774 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:50.144261 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:50.144566 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:50.146679 602969 pod_ready.go:103] pod "metrics-server-84c5f94fbc-g7jcr" in "kube-system" namespace has status "Ready":"False"
I1213 19:19:50.288390 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:50.514008 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:50.648275 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:50.649898 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:50.788546 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:51.014168 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:51.134498 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:51.135723 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:51.293382 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:51.514369 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:51.637001 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:51.639183 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:51.789516 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:52.018454 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:52.132634 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:52.134129 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:52.289744 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:52.512798 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:52.647245 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:52.648787 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:52.653226 602969 pod_ready.go:103] pod "metrics-server-84c5f94fbc-g7jcr" in "kube-system" namespace has status "Ready":"False"
I1213 19:19:52.788935 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:53.014878 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:53.135610 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:53.138325 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:53.289605 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:53.513675 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:53.633387 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:53.636095 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:53.788718 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:54.020268 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:54.132212 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:54.132739 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:54.288627 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:54.513437 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:54.639017 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:54.639361 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:54.788600 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:55.019780 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:55.135485 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:55.136975 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:55.143199 602969 pod_ready.go:103] pod "metrics-server-84c5f94fbc-g7jcr" in "kube-system" namespace has status "Ready":"False"
I1213 19:19:55.288914 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:55.514550 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:55.633239 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:55.634475 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:55.789007 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:56.017594 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:56.133967 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:56.134589 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:56.288756 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:56.512643 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:56.632760 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:56.635279 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:56.788398 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:57.013827 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:57.132172 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:57.133877 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:57.288213 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:57.513044 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:57.633465 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:57.634971 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:57.638124 602969 pod_ready.go:103] pod "metrics-server-84c5f94fbc-g7jcr" in "kube-system" namespace has status "Ready":"False"
I1213 19:19:57.789388 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:58.013941 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:58.133369 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:58.134953 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:58.289323 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:58.513659 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:58.633687 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:58.636209 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:58.789326 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:59.013883 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:59.155798 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:59.156038 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:59.288594 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:19:59.512330 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:19:59.638783 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:19:59.640044 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:19:59.640781 602969 pod_ready.go:103] pod "metrics-server-84c5f94fbc-g7jcr" in "kube-system" namespace has status "Ready":"False"
I1213 19:19:59.790902 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:00.070081 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:00.156179 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:20:00.166602 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:00.316992 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:00.527241 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:00.676815 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:00.712768 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:20:00.847142 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:01.020393 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:01.152526 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:20:01.167867 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:01.289311 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:01.512968 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:01.634331 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:01.637221 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:20:01.789851 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:02.029159 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:02.137616 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:20:02.148018 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:02.150699 602969 pod_ready.go:103] pod "metrics-server-84c5f94fbc-g7jcr" in "kube-system" namespace has status "Ready":"False"
I1213 19:20:02.289957 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:02.521936 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:02.635028 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:20:02.640003 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:02.791133 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:03.015522 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:03.132739 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:20:03.133141 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:03.290347 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:03.512827 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:03.635478 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:20:03.636481 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:03.790544 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:04.014368 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:04.135801 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:20:04.137715 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:04.289648 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:04.512571 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:04.640686 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:20:04.642543 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:04.644580 602969 pod_ready.go:103] pod "metrics-server-84c5f94fbc-g7jcr" in "kube-system" namespace has status "Ready":"False"
I1213 19:20:04.812287 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:05.044546 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:05.136402 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:20:05.143417 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:05.289020 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:05.514026 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:05.638751 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:20:05.640261 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:05.793173 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:06.015932 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:06.133981 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:06.135052 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:20:06.288936 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:06.513436 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:06.633907 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:20:06.635179 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:06.789073 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:07.015510 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:07.136087 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:20:07.136169 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:07.139564 602969 pod_ready.go:103] pod "metrics-server-84c5f94fbc-g7jcr" in "kube-system" namespace has status "Ready":"False"
I1213 19:20:07.289141 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:07.513160 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:07.633094 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:20:07.634880 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:07.790913 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:08.014398 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:08.137505 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:20:08.140139 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:08.289634 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:08.513350 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:08.632550 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:08.634494 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:20:08.788437 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:09.016508 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:09.139393 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:20:09.141910 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:09.155680 602969 pod_ready.go:103] pod "metrics-server-84c5f94fbc-g7jcr" in "kube-system" namespace has status "Ready":"False"
I1213 19:20:09.289744 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:09.514429 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:09.634616 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:09.635300 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:20:09.793078 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:10.026954 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:10.132998 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:20:10.134060 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:10.289024 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:10.513687 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:10.633539 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:10.634473 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:20:10.788547 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:11.014898 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:11.135610 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:11.138976 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:20:11.288860 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:11.513364 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:11.632145 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:11.633274 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:20:11.633863 602969 pod_ready.go:103] pod "metrics-server-84c5f94fbc-g7jcr" in "kube-system" namespace has status "Ready":"False"
I1213 19:20:11.789034 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:12.023861 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:12.139256 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:20:12.140575 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:12.289201 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:12.517252 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:12.633790 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:20:12.635523 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:12.789376 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:13.016912 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:13.139113 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:13.141565 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:20:13.288941 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:13.518018 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:13.632009 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:20:13.634862 602969 pod_ready.go:103] pod "metrics-server-84c5f94fbc-g7jcr" in "kube-system" namespace has status "Ready":"False"
I1213 19:20:13.634895 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:13.788636 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:14.018063 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:14.135813 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:14.136337 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1213 19:20:14.289217 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:14.513284 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:14.633631 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:14.635788 602969 kapi.go:107] duration metric: took 46.507833164s to wait for kubernetes.io/minikube-addons=registry ...
I1213 19:20:14.789381 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:15.029174 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:15.141655 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:15.289034 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:15.513478 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:15.633942 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:15.789331 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:16.015948 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:16.136565 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:16.145005 602969 pod_ready.go:103] pod "metrics-server-84c5f94fbc-g7jcr" in "kube-system" namespace has status "Ready":"False"
I1213 19:20:16.288915 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:16.513700 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:16.636525 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:16.789727 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:17.014867 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:17.137578 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:17.289841 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:17.513784 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:17.633105 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:17.789484 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:18.022624 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:18.140248 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:18.288599 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:18.513057 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:18.632575 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:18.640037 602969 pod_ready.go:103] pod "metrics-server-84c5f94fbc-g7jcr" in "kube-system" namespace has status "Ready":"False"
I1213 19:20:18.789504 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:19.022468 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:19.133212 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:19.289041 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:19.513478 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:19.632476 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:19.789055 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:20.020624 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:20.143687 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:20.290244 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:20.513839 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:20.633022 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:20.789103 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:21.030957 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:21.137112 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:21.139362 602969 pod_ready.go:103] pod "metrics-server-84c5f94fbc-g7jcr" in "kube-system" namespace has status "Ready":"False"
I1213 19:20:21.288758 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:21.513599 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:21.634029 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:21.789807 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:22.017041 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:22.133097 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:22.304824 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:22.513267 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:22.633313 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:22.788346 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:23.017822 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:23.131759 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:23.289610 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:23.513266 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:23.636653 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:23.638376 602969 pod_ready.go:103] pod "metrics-server-84c5f94fbc-g7jcr" in "kube-system" namespace has status "Ready":"False"
I1213 19:20:23.789193 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:24.020867 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:24.135162 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:24.290797 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:24.521966 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:24.642084 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:24.788736 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:25.015145 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:25.135198 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:25.289972 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:25.513394 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:25.634048 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:25.789882 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:26.014546 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:26.136141 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:26.146013 602969 pod_ready.go:103] pod "metrics-server-84c5f94fbc-g7jcr" in "kube-system" namespace has status "Ready":"False"
I1213 19:20:26.293030 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:26.518439 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:26.634951 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:26.792262 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:27.013288 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:27.132968 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:27.289216 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:27.514447 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:27.632103 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:27.789207 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:28.017253 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:28.145210 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:28.289132 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:28.520569 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:28.635024 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:28.635875 602969 pod_ready.go:103] pod "metrics-server-84c5f94fbc-g7jcr" in "kube-system" namespace has status "Ready":"False"
I1213 19:20:28.789278 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:29.014206 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:29.139383 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:29.288706 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:29.513883 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:29.634664 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:29.791618 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:30.020353 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:30.139727 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:30.288465 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:30.515253 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:30.635961 602969 pod_ready.go:103] pod "metrics-server-84c5f94fbc-g7jcr" in "kube-system" namespace has status "Ready":"False"
I1213 19:20:30.637639 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:30.789132 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:31.015337 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:31.134245 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:31.289615 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:31.514090 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:31.633246 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:31.789185 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:32.015503 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:32.131557 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:32.289416 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:32.518303 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:32.640843 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:32.643531 602969 pod_ready.go:103] pod "metrics-server-84c5f94fbc-g7jcr" in "kube-system" namespace has status "Ready":"False"
I1213 19:20:32.789628 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:33.018720 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:33.134361 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:33.288610 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:33.512882 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:33.633328 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:33.789183 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:34.013826 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:34.133333 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:34.288683 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:34.513055 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:34.638394 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:34.789863 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:35.018056 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:35.134152 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:35.134873 602969 pod_ready.go:103] pod "metrics-server-84c5f94fbc-g7jcr" in "kube-system" namespace has status "Ready":"False"
I1213 19:20:35.288402 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:35.514464 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:35.641587 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:35.790447 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:36.034212 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:36.136737 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:36.288750 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:36.514366 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:36.638336 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:36.800939 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:37.025466 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:37.137327 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:37.138588 602969 pod_ready.go:103] pod "metrics-server-84c5f94fbc-g7jcr" in "kube-system" namespace has status "Ready":"False"
I1213 19:20:37.289196 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:37.512883 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:37.652347 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:37.791008 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:38.014218 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:38.134012 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:38.300160 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:38.523258 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:38.647934 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:38.804079 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:39.066002 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:39.214225 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:39.228628 602969 pod_ready.go:103] pod "metrics-server-84c5f94fbc-g7jcr" in "kube-system" namespace has status "Ready":"False"
I1213 19:20:39.293700 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:39.513347 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:39.632635 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:39.793893 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:40.034033 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:40.142980 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:40.290171 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:40.512997 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:40.637209 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:40.789419 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:41.015513 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:41.134702 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:41.292884 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:41.513085 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:41.639119 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:41.645783 602969 pod_ready.go:103] pod "metrics-server-84c5f94fbc-g7jcr" in "kube-system" namespace has status "Ready":"False"
I1213 19:20:41.790097 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:42.015437 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:42.133077 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:42.289343 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:42.513468 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:42.634784 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:42.789568 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:43.024691 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:43.134400 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:43.291754 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:43.516872 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:43.635803 602969 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1213 19:20:43.790316 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:44.015891 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:44.133459 602969 kapi.go:107] duration metric: took 1m16.006613767s to wait for app.kubernetes.io/name=ingress-nginx ...
I1213 19:20:44.136564 602969 pod_ready.go:103] pod "metrics-server-84c5f94fbc-g7jcr" in "kube-system" namespace has status "Ready":"False"
I1213 19:20:44.289895 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:44.512860 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:44.789321 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:45.099748 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:45.304635 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:45.513890 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:45.789078 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:46.017143 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:46.289593 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:46.512358 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:46.634782 602969 pod_ready.go:103] pod "metrics-server-84c5f94fbc-g7jcr" in "kube-system" namespace has status "Ready":"False"
I1213 19:20:46.789287 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:47.013249 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:47.289667 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:47.513740 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:47.789566 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:48.033765 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:48.292904 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:48.515082 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:48.636139 602969 pod_ready.go:103] pod "metrics-server-84c5f94fbc-g7jcr" in "kube-system" namespace has status "Ready":"False"
I1213 19:20:48.789523 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1213 19:20:49.013959 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:49.290131 602969 kapi.go:107] duration metric: took 1m16.505104895s to wait for kubernetes.io/minikube-addons=gcp-auth ...
I1213 19:20:49.292413 602969 out.go:177] * Your GCP credentials will now be mounted into every pod created in the addons-248098 cluster.
I1213 19:20:49.294959 602969 out.go:177] * If you don't want your credentials mounted into a specific pod, add a label with the `gcp-auth-skip-secret` key to your pod configuration.
I1213 19:20:49.297342 602969 out.go:177] * If you want existing pods to be mounted with credentials, either recreate them or rerun addons enable with --refresh.
I1213 19:20:49.512561 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:50.051929 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:50.159213 602969 pod_ready.go:93] pod "metrics-server-84c5f94fbc-g7jcr" in "kube-system" namespace has status "Ready":"True"
I1213 19:20:50.159240 602969 pod_ready.go:82] duration metric: took 1m8.532866479s for pod "metrics-server-84c5f94fbc-g7jcr" in "kube-system" namespace to be "Ready" ...
I1213 19:20:50.159254 602969 pod_ready.go:79] waiting up to 6m0s for pod "nvidia-device-plugin-daemonset-xsrsn" in "kube-system" namespace to be "Ready" ...
I1213 19:20:50.185362 602969 pod_ready.go:93] pod "nvidia-device-plugin-daemonset-xsrsn" in "kube-system" namespace has status "Ready":"True"
I1213 19:20:50.185387 602969 pod_ready.go:82] duration metric: took 26.125113ms for pod "nvidia-device-plugin-daemonset-xsrsn" in "kube-system" namespace to be "Ready" ...
I1213 19:20:50.185410 602969 pod_ready.go:39] duration metric: took 1m10.551212061s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I1213 19:20:50.185430 602969 api_server.go:52] waiting for apiserver process to appear ...
I1213 19:20:50.185462 602969 cri.go:54] listing CRI containers in root : {State:all Name:kube-apiserver Namespaces:[]}
I1213 19:20:50.185531 602969 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1213 19:20:50.294525 602969 cri.go:89] found id: "27ee00545a23ca3d022b68468445151371316d97acbaf2235b93791b944d3e2d"
I1213 19:20:50.294549 602969 cri.go:89] found id: ""
I1213 19:20:50.294557 602969 logs.go:282] 1 containers: [27ee00545a23ca3d022b68468445151371316d97acbaf2235b93791b944d3e2d]
I1213 19:20:50.294618 602969 ssh_runner.go:195] Run: which crictl
I1213 19:20:50.304608 602969 cri.go:54] listing CRI containers in root : {State:all Name:etcd Namespaces:[]}
I1213 19:20:50.304682 602969 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1213 19:20:50.362212 602969 cri.go:89] found id: "289abb226f700e5f3c20a349d1479217ce3aa4bc311be8aa6d6f12374b0cb68a"
I1213 19:20:50.362235 602969 cri.go:89] found id: ""
I1213 19:20:50.362243 602969 logs.go:282] 1 containers: [289abb226f700e5f3c20a349d1479217ce3aa4bc311be8aa6d6f12374b0cb68a]
I1213 19:20:50.362329 602969 ssh_runner.go:195] Run: which crictl
I1213 19:20:50.366049 602969 cri.go:54] listing CRI containers in root : {State:all Name:coredns Namespaces:[]}
I1213 19:20:50.366120 602969 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1213 19:20:50.470834 602969 cri.go:89] found id: "d5719b1b478dec4dc55817883d4f9577dc475cde036d3e161334d371c1e81411"
I1213 19:20:50.470859 602969 cri.go:89] found id: ""
I1213 19:20:50.470867 602969 logs.go:282] 1 containers: [d5719b1b478dec4dc55817883d4f9577dc475cde036d3e161334d371c1e81411]
I1213 19:20:50.470921 602969 ssh_runner.go:195] Run: which crictl
I1213 19:20:50.503447 602969 cri.go:54] listing CRI containers in root : {State:all Name:kube-scheduler Namespaces:[]}
I1213 19:20:50.503522 602969 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1213 19:20:50.517428 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:50.605090 602969 cri.go:89] found id: "833e3ba74cac9f8b814ea06aeafe171150043188b329c9d621a03c75dbc4578f"
I1213 19:20:50.605116 602969 cri.go:89] found id: ""
I1213 19:20:50.605134 602969 logs.go:282] 1 containers: [833e3ba74cac9f8b814ea06aeafe171150043188b329c9d621a03c75dbc4578f]
I1213 19:20:50.605196 602969 ssh_runner.go:195] Run: which crictl
I1213 19:20:50.610821 602969 cri.go:54] listing CRI containers in root : {State:all Name:kube-proxy Namespaces:[]}
I1213 19:20:50.610898 602969 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1213 19:20:50.690567 602969 cri.go:89] found id: "1449f483df90f5a531d350913e0aa7cdf914d18ed8dca152d252402554d37102"
I1213 19:20:50.690647 602969 cri.go:89] found id: ""
I1213 19:20:50.690662 602969 logs.go:282] 1 containers: [1449f483df90f5a531d350913e0aa7cdf914d18ed8dca152d252402554d37102]
I1213 19:20:50.690732 602969 ssh_runner.go:195] Run: which crictl
I1213 19:20:50.695050 602969 cri.go:54] listing CRI containers in root : {State:all Name:kube-controller-manager Namespaces:[]}
I1213 19:20:50.695158 602969 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1213 19:20:50.741497 602969 cri.go:89] found id: "4283a1804a94cc88954082e4f508a1cbf5f868d2c0662a9d3f0e826e9b6c5f1a"
I1213 19:20:50.741522 602969 cri.go:89] found id: ""
I1213 19:20:50.741531 602969 logs.go:282] 1 containers: [4283a1804a94cc88954082e4f508a1cbf5f868d2c0662a9d3f0e826e9b6c5f1a]
I1213 19:20:50.741591 602969 ssh_runner.go:195] Run: which crictl
I1213 19:20:50.745570 602969 cri.go:54] listing CRI containers in root : {State:all Name:kindnet Namespaces:[]}
I1213 19:20:50.745648 602969 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1213 19:20:50.791676 602969 cri.go:89] found id: "da25e26a83aad96484fe1eecafba3a8b5e62f5486ff06573a87eb752e248b7f3"
I1213 19:20:50.791699 602969 cri.go:89] found id: ""
I1213 19:20:50.791707 602969 logs.go:282] 1 containers: [da25e26a83aad96484fe1eecafba3a8b5e62f5486ff06573a87eb752e248b7f3]
I1213 19:20:50.791768 602969 ssh_runner.go:195] Run: which crictl
I1213 19:20:50.802647 602969 logs.go:123] Gathering logs for kubelet ...
I1213 19:20:50.802675 602969 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
W1213 19:20:50.885177 602969 logs.go:138] Found kubelet problem: Dec 13 19:19:23 addons-248098 kubelet[1527]: W1213 19:19:23.865745 1527 reflector.go:561] object-"kube-system"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:addons-248098" cannot list resource "configmaps" in API group "" in the namespace "kube-system": no relationship found between node 'addons-248098' and this object
W1213 19:20:50.885587 602969 logs.go:138] Found kubelet problem: Dec 13 19:19:23 addons-248098 kubelet[1527]: E1213 19:19:23.865827 1527 reflector.go:158] "Unhandled Error" err="object-\"kube-system\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:addons-248098\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\": no relationship found between node 'addons-248098' and this object" logger="UnhandledError"
W1213 19:20:50.911543 602969 logs.go:138] Found kubelet problem: Dec 13 19:19:39 addons-248098 kubelet[1527]: W1213 19:19:39.477016 1527 reflector.go:561] object-"gcp-auth"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:addons-248098" cannot list resource "configmaps" in API group "" in the namespace "gcp-auth": no relationship found between node 'addons-248098' and this object
W1213 19:20:50.911946 602969 logs.go:138] Found kubelet problem: Dec 13 19:19:39 addons-248098 kubelet[1527]: E1213 19:19:39.477065 1527 reflector.go:158] "Unhandled Error" err="object-\"gcp-auth\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:addons-248098\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"gcp-auth\": no relationship found between node 'addons-248098' and this object" logger="UnhandledError"
I1213 19:20:50.975179 602969 logs.go:123] Gathering logs for describe nodes ...
I1213 19:20:50.975311 602969 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.2/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
I1213 19:20:51.015960 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:51.218135 602969 logs.go:123] Gathering logs for kube-apiserver [27ee00545a23ca3d022b68468445151371316d97acbaf2235b93791b944d3e2d] ...
I1213 19:20:51.218208 602969 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 27ee00545a23ca3d022b68468445151371316d97acbaf2235b93791b944d3e2d"
I1213 19:20:51.297330 602969 logs.go:123] Gathering logs for etcd [289abb226f700e5f3c20a349d1479217ce3aa4bc311be8aa6d6f12374b0cb68a] ...
I1213 19:20:51.297371 602969 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 289abb226f700e5f3c20a349d1479217ce3aa4bc311be8aa6d6f12374b0cb68a"
I1213 19:20:51.364315 602969 logs.go:123] Gathering logs for kube-proxy [1449f483df90f5a531d350913e0aa7cdf914d18ed8dca152d252402554d37102] ...
I1213 19:20:51.364352 602969 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 1449f483df90f5a531d350913e0aa7cdf914d18ed8dca152d252402554d37102"
I1213 19:20:51.419594 602969 logs.go:123] Gathering logs for CRI-O ...
I1213 19:20:51.419625 602969 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u crio -n 400"
I1213 19:20:51.513154 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:51.528485 602969 logs.go:123] Gathering logs for dmesg ...
I1213 19:20:51.528569 602969 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1213 19:20:51.546548 602969 logs.go:123] Gathering logs for coredns [d5719b1b478dec4dc55817883d4f9577dc475cde036d3e161334d371c1e81411] ...
I1213 19:20:51.546579 602969 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 d5719b1b478dec4dc55817883d4f9577dc475cde036d3e161334d371c1e81411"
I1213 19:20:51.597667 602969 logs.go:123] Gathering logs for kube-scheduler [833e3ba74cac9f8b814ea06aeafe171150043188b329c9d621a03c75dbc4578f] ...
I1213 19:20:51.597699 602969 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 833e3ba74cac9f8b814ea06aeafe171150043188b329c9d621a03c75dbc4578f"
I1213 19:20:51.650955 602969 logs.go:123] Gathering logs for kube-controller-manager [4283a1804a94cc88954082e4f508a1cbf5f868d2c0662a9d3f0e826e9b6c5f1a] ...
I1213 19:20:51.651038 602969 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 4283a1804a94cc88954082e4f508a1cbf5f868d2c0662a9d3f0e826e9b6c5f1a"
I1213 19:20:51.747175 602969 logs.go:123] Gathering logs for kindnet [da25e26a83aad96484fe1eecafba3a8b5e62f5486ff06573a87eb752e248b7f3] ...
I1213 19:20:51.747210 602969 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 da25e26a83aad96484fe1eecafba3a8b5e62f5486ff06573a87eb752e248b7f3"
I1213 19:20:51.795094 602969 logs.go:123] Gathering logs for container status ...
I1213 19:20:51.795127 602969 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1213 19:20:51.851701 602969 out.go:358] Setting ErrFile to fd 2...
I1213 19:20:51.851732 602969 out.go:392] TERM=,COLORTERM=, which probably does not support color
W1213 19:20:51.851816 602969 out.go:270] X Problems detected in kubelet:
W1213 19:20:51.851832 602969 out.go:270] Dec 13 19:19:23 addons-248098 kubelet[1527]: W1213 19:19:23.865745 1527 reflector.go:561] object-"kube-system"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:addons-248098" cannot list resource "configmaps" in API group "" in the namespace "kube-system": no relationship found between node 'addons-248098' and this object
W1213 19:20:51.851839 602969 out.go:270] Dec 13 19:19:23 addons-248098 kubelet[1527]: E1213 19:19:23.865827 1527 reflector.go:158] "Unhandled Error" err="object-\"kube-system\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:addons-248098\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\": no relationship found between node 'addons-248098' and this object" logger="UnhandledError"
W1213 19:20:51.851850 602969 out.go:270] Dec 13 19:19:39 addons-248098 kubelet[1527]: W1213 19:19:39.477016 1527 reflector.go:561] object-"gcp-auth"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:addons-248098" cannot list resource "configmaps" in API group "" in the namespace "gcp-auth": no relationship found between node 'addons-248098' and this object
W1213 19:20:51.851875 602969 out.go:270] Dec 13 19:19:39 addons-248098 kubelet[1527]: E1213 19:19:39.477065 1527 reflector.go:158] "Unhandled Error" err="object-\"gcp-auth\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:addons-248098\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"gcp-auth\": no relationship found between node 'addons-248098' and this object" logger="UnhandledError"
I1213 19:20:51.851882 602969 out.go:358] Setting ErrFile to fd 2...
I1213 19:20:51.851888 602969 out.go:392] TERM=,COLORTERM=, which probably does not support color
I1213 19:20:52.014612 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:52.515749 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:53.014362 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:53.513704 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:54.014702 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:54.513764 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:55.015201 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:55.584150 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:56.014613 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:56.513381 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:57.013544 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:57.514445 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:58.026351 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:58.514303 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:59.012890 602969 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1213 19:20:59.512881 602969 kapi.go:107] duration metric: took 1m30.505185227s to wait for kubernetes.io/minikube-addons=csi-hostpath-driver ...
I1213 19:20:59.515617 602969 out.go:177] * Enabled addons: amd-gpu-device-plugin, nvidia-device-plugin, ingress-dns, cloud-spanner, storage-provisioner, default-storageclass, inspektor-gadget, metrics-server, yakd, volumesnapshots, registry, ingress, gcp-auth, csi-hostpath-driver
I1213 19:20:59.518027 602969 addons.go:510] duration metric: took 1m38.287350638s for enable addons: enabled=[amd-gpu-device-plugin nvidia-device-plugin ingress-dns cloud-spanner storage-provisioner default-storageclass inspektor-gadget metrics-server yakd volumesnapshots registry ingress gcp-auth csi-hostpath-driver]
I1213 19:21:01.853210 602969 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1213 19:21:01.867505 602969 api_server.go:72] duration metric: took 1m40.637063007s to wait for apiserver process to appear ...
I1213 19:21:01.867534 602969 api_server.go:88] waiting for apiserver healthz status ...
I1213 19:21:01.868050 602969 cri.go:54] listing CRI containers in root : {State:all Name:kube-apiserver Namespaces:[]}
I1213 19:21:01.868129 602969 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1213 19:21:01.908108 602969 cri.go:89] found id: "27ee00545a23ca3d022b68468445151371316d97acbaf2235b93791b944d3e2d"
I1213 19:21:01.908133 602969 cri.go:89] found id: ""
I1213 19:21:01.908141 602969 logs.go:282] 1 containers: [27ee00545a23ca3d022b68468445151371316d97acbaf2235b93791b944d3e2d]
I1213 19:21:01.908199 602969 ssh_runner.go:195] Run: which crictl
I1213 19:21:01.912369 602969 cri.go:54] listing CRI containers in root : {State:all Name:etcd Namespaces:[]}
I1213 19:21:01.912453 602969 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1213 19:21:01.952191 602969 cri.go:89] found id: "289abb226f700e5f3c20a349d1479217ce3aa4bc311be8aa6d6f12374b0cb68a"
I1213 19:21:01.952214 602969 cri.go:89] found id: ""
I1213 19:21:01.952223 602969 logs.go:282] 1 containers: [289abb226f700e5f3c20a349d1479217ce3aa4bc311be8aa6d6f12374b0cb68a]
I1213 19:21:01.952279 602969 ssh_runner.go:195] Run: which crictl
I1213 19:21:01.955874 602969 cri.go:54] listing CRI containers in root : {State:all Name:coredns Namespaces:[]}
I1213 19:21:01.955949 602969 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1213 19:21:01.995630 602969 cri.go:89] found id: "d5719b1b478dec4dc55817883d4f9577dc475cde036d3e161334d371c1e81411"
I1213 19:21:01.995655 602969 cri.go:89] found id: ""
I1213 19:21:01.995663 602969 logs.go:282] 1 containers: [d5719b1b478dec4dc55817883d4f9577dc475cde036d3e161334d371c1e81411]
I1213 19:21:01.995723 602969 ssh_runner.go:195] Run: which crictl
I1213 19:21:01.999503 602969 cri.go:54] listing CRI containers in root : {State:all Name:kube-scheduler Namespaces:[]}
I1213 19:21:01.999589 602969 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1213 19:21:02.046099 602969 cri.go:89] found id: "833e3ba74cac9f8b814ea06aeafe171150043188b329c9d621a03c75dbc4578f"
I1213 19:21:02.046123 602969 cri.go:89] found id: ""
I1213 19:21:02.046131 602969 logs.go:282] 1 containers: [833e3ba74cac9f8b814ea06aeafe171150043188b329c9d621a03c75dbc4578f]
I1213 19:21:02.046193 602969 ssh_runner.go:195] Run: which crictl
I1213 19:21:02.050255 602969 cri.go:54] listing CRI containers in root : {State:all Name:kube-proxy Namespaces:[]}
I1213 19:21:02.050412 602969 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1213 19:21:02.092267 602969 cri.go:89] found id: "1449f483df90f5a531d350913e0aa7cdf914d18ed8dca152d252402554d37102"
I1213 19:21:02.092292 602969 cri.go:89] found id: ""
I1213 19:21:02.092300 602969 logs.go:282] 1 containers: [1449f483df90f5a531d350913e0aa7cdf914d18ed8dca152d252402554d37102]
I1213 19:21:02.092389 602969 ssh_runner.go:195] Run: which crictl
I1213 19:21:02.096421 602969 cri.go:54] listing CRI containers in root : {State:all Name:kube-controller-manager Namespaces:[]}
I1213 19:21:02.096586 602969 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1213 19:21:02.137435 602969 cri.go:89] found id: "4283a1804a94cc88954082e4f508a1cbf5f868d2c0662a9d3f0e826e9b6c5f1a"
I1213 19:21:02.137511 602969 cri.go:89] found id: ""
I1213 19:21:02.137535 602969 logs.go:282] 1 containers: [4283a1804a94cc88954082e4f508a1cbf5f868d2c0662a9d3f0e826e9b6c5f1a]
I1213 19:21:02.137622 602969 ssh_runner.go:195] Run: which crictl
I1213 19:21:02.141668 602969 cri.go:54] listing CRI containers in root : {State:all Name:kindnet Namespaces:[]}
I1213 19:21:02.141786 602969 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1213 19:21:02.183547 602969 cri.go:89] found id: "da25e26a83aad96484fe1eecafba3a8b5e62f5486ff06573a87eb752e248b7f3"
I1213 19:21:02.183576 602969 cri.go:89] found id: ""
I1213 19:21:02.183585 602969 logs.go:282] 1 containers: [da25e26a83aad96484fe1eecafba3a8b5e62f5486ff06573a87eb752e248b7f3]
I1213 19:21:02.183701 602969 ssh_runner.go:195] Run: which crictl
I1213 19:21:02.188103 602969 logs.go:123] Gathering logs for etcd [289abb226f700e5f3c20a349d1479217ce3aa4bc311be8aa6d6f12374b0cb68a] ...
I1213 19:21:02.188132 602969 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 289abb226f700e5f3c20a349d1479217ce3aa4bc311be8aa6d6f12374b0cb68a"
I1213 19:21:02.241298 602969 logs.go:123] Gathering logs for kube-scheduler [833e3ba74cac9f8b814ea06aeafe171150043188b329c9d621a03c75dbc4578f] ...
I1213 19:21:02.241332 602969 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 833e3ba74cac9f8b814ea06aeafe171150043188b329c9d621a03c75dbc4578f"
I1213 19:21:02.289740 602969 logs.go:123] Gathering logs for kubelet ...
I1213 19:21:02.289774 602969 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
W1213 19:21:02.345674 602969 logs.go:138] Found kubelet problem: Dec 13 19:19:23 addons-248098 kubelet[1527]: W1213 19:19:23.865745 1527 reflector.go:561] object-"kube-system"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:addons-248098" cannot list resource "configmaps" in API group "" in the namespace "kube-system": no relationship found between node 'addons-248098' and this object
W1213 19:21:02.345950 602969 logs.go:138] Found kubelet problem: Dec 13 19:19:23 addons-248098 kubelet[1527]: E1213 19:19:23.865827 1527 reflector.go:158] "Unhandled Error" err="object-\"kube-system\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:addons-248098\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\": no relationship found between node 'addons-248098' and this object" logger="UnhandledError"
W1213 19:21:02.361436 602969 logs.go:138] Found kubelet problem: Dec 13 19:19:39 addons-248098 kubelet[1527]: W1213 19:19:39.477016 1527 reflector.go:561] object-"gcp-auth"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:addons-248098" cannot list resource "configmaps" in API group "" in the namespace "gcp-auth": no relationship found between node 'addons-248098' and this object
W1213 19:21:02.361671 602969 logs.go:138] Found kubelet problem: Dec 13 19:19:39 addons-248098 kubelet[1527]: E1213 19:19:39.477065 1527 reflector.go:158] "Unhandled Error" err="object-\"gcp-auth\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:addons-248098\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"gcp-auth\": no relationship found between node 'addons-248098' and this object" logger="UnhandledError"
I1213 19:21:02.401354 602969 logs.go:123] Gathering logs for dmesg ...
I1213 19:21:02.401382 602969 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1213 19:21:02.419350 602969 logs.go:123] Gathering logs for describe nodes ...
I1213 19:21:02.419387 602969 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.2/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
I1213 19:21:02.572424 602969 logs.go:123] Gathering logs for kube-apiserver [27ee00545a23ca3d022b68468445151371316d97acbaf2235b93791b944d3e2d] ...
I1213 19:21:02.572457 602969 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 27ee00545a23ca3d022b68468445151371316d97acbaf2235b93791b944d3e2d"
I1213 19:21:02.641829 602969 logs.go:123] Gathering logs for CRI-O ...
I1213 19:21:02.641866 602969 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u crio -n 400"
I1213 19:21:02.736850 602969 logs.go:123] Gathering logs for container status ...
I1213 19:21:02.736888 602969 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1213 19:21:02.788016 602969 logs.go:123] Gathering logs for coredns [d5719b1b478dec4dc55817883d4f9577dc475cde036d3e161334d371c1e81411] ...
I1213 19:21:02.788052 602969 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 d5719b1b478dec4dc55817883d4f9577dc475cde036d3e161334d371c1e81411"
I1213 19:21:02.830967 602969 logs.go:123] Gathering logs for kube-proxy [1449f483df90f5a531d350913e0aa7cdf914d18ed8dca152d252402554d37102] ...
I1213 19:21:02.830999 602969 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 1449f483df90f5a531d350913e0aa7cdf914d18ed8dca152d252402554d37102"
I1213 19:21:02.869566 602969 logs.go:123] Gathering logs for kube-controller-manager [4283a1804a94cc88954082e4f508a1cbf5f868d2c0662a9d3f0e826e9b6c5f1a] ...
I1213 19:21:02.869595 602969 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 4283a1804a94cc88954082e4f508a1cbf5f868d2c0662a9d3f0e826e9b6c5f1a"
I1213 19:21:02.939984 602969 logs.go:123] Gathering logs for kindnet [da25e26a83aad96484fe1eecafba3a8b5e62f5486ff06573a87eb752e248b7f3] ...
I1213 19:21:02.940026 602969 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 da25e26a83aad96484fe1eecafba3a8b5e62f5486ff06573a87eb752e248b7f3"
I1213 19:21:02.992970 602969 out.go:358] Setting ErrFile to fd 2...
I1213 19:21:02.993000 602969 out.go:392] TERM=,COLORTERM=, which probably does not support color
W1213 19:21:02.993059 602969 out.go:270] X Problems detected in kubelet:
W1213 19:21:02.993200 602969 out.go:270] Dec 13 19:19:23 addons-248098 kubelet[1527]: W1213 19:19:23.865745 1527 reflector.go:561] object-"kube-system"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:addons-248098" cannot list resource "configmaps" in API group "" in the namespace "kube-system": no relationship found between node 'addons-248098' and this object
W1213 19:21:02.993218 602969 out.go:270] Dec 13 19:19:23 addons-248098 kubelet[1527]: E1213 19:19:23.865827 1527 reflector.go:158] "Unhandled Error" err="object-\"kube-system\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:addons-248098\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\": no relationship found between node 'addons-248098' and this object" logger="UnhandledError"
W1213 19:21:02.993230 602969 out.go:270] Dec 13 19:19:39 addons-248098 kubelet[1527]: W1213 19:19:39.477016 1527 reflector.go:561] object-"gcp-auth"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:addons-248098" cannot list resource "configmaps" in API group "" in the namespace "gcp-auth": no relationship found between node 'addons-248098' and this object
W1213 19:21:02.993247 602969 out.go:270] Dec 13 19:19:39 addons-248098 kubelet[1527]: E1213 19:19:39.477065 1527 reflector.go:158] "Unhandled Error" err="object-\"gcp-auth\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:addons-248098\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"gcp-auth\": no relationship found between node 'addons-248098' and this object" logger="UnhandledError"
I1213 19:21:02.993261 602969 out.go:358] Setting ErrFile to fd 2...
I1213 19:21:02.993268 602969 out.go:392] TERM=,COLORTERM=, which probably does not support color
I1213 19:21:12.994733 602969 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8443/healthz ...
I1213 19:21:13.008029 602969 api_server.go:279] https://192.168.49.2:8443/healthz returned 200:
ok
I1213 19:21:13.011610 602969 api_server.go:141] control plane version: v1.31.2
I1213 19:21:13.011651 602969 api_server.go:131] duration metric: took 11.144109173s to wait for apiserver health ...
I1213 19:21:13.011662 602969 system_pods.go:43] waiting for kube-system pods to appear ...
I1213 19:21:13.011689 602969 cri.go:54] listing CRI containers in root : {State:all Name:kube-apiserver Namespaces:[]}
I1213 19:21:13.011757 602969 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1213 19:21:13.055983 602969 cri.go:89] found id: "27ee00545a23ca3d022b68468445151371316d97acbaf2235b93791b944d3e2d"
I1213 19:21:13.056004 602969 cri.go:89] found id: ""
I1213 19:21:13.056012 602969 logs.go:282] 1 containers: [27ee00545a23ca3d022b68468445151371316d97acbaf2235b93791b944d3e2d]
I1213 19:21:13.056076 602969 ssh_runner.go:195] Run: which crictl
I1213 19:21:13.060197 602969 cri.go:54] listing CRI containers in root : {State:all Name:etcd Namespaces:[]}
I1213 19:21:13.060272 602969 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1213 19:21:13.114407 602969 cri.go:89] found id: "289abb226f700e5f3c20a349d1479217ce3aa4bc311be8aa6d6f12374b0cb68a"
I1213 19:21:13.114431 602969 cri.go:89] found id: ""
I1213 19:21:13.114438 602969 logs.go:282] 1 containers: [289abb226f700e5f3c20a349d1479217ce3aa4bc311be8aa6d6f12374b0cb68a]
I1213 19:21:13.114500 602969 ssh_runner.go:195] Run: which crictl
I1213 19:21:13.118390 602969 cri.go:54] listing CRI containers in root : {State:all Name:coredns Namespaces:[]}
I1213 19:21:13.118525 602969 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1213 19:21:13.162684 602969 cri.go:89] found id: "d5719b1b478dec4dc55817883d4f9577dc475cde036d3e161334d371c1e81411"
I1213 19:21:13.162717 602969 cri.go:89] found id: ""
I1213 19:21:13.162726 602969 logs.go:282] 1 containers: [d5719b1b478dec4dc55817883d4f9577dc475cde036d3e161334d371c1e81411]
I1213 19:21:13.162789 602969 ssh_runner.go:195] Run: which crictl
I1213 19:21:13.166866 602969 cri.go:54] listing CRI containers in root : {State:all Name:kube-scheduler Namespaces:[]}
I1213 19:21:13.166956 602969 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1213 19:21:13.220934 602969 cri.go:89] found id: "833e3ba74cac9f8b814ea06aeafe171150043188b329c9d621a03c75dbc4578f"
I1213 19:21:13.220980 602969 cri.go:89] found id: ""
I1213 19:21:13.220989 602969 logs.go:282] 1 containers: [833e3ba74cac9f8b814ea06aeafe171150043188b329c9d621a03c75dbc4578f]
I1213 19:21:13.221090 602969 ssh_runner.go:195] Run: which crictl
I1213 19:21:13.228707 602969 cri.go:54] listing CRI containers in root : {State:all Name:kube-proxy Namespaces:[]}
I1213 19:21:13.228829 602969 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1213 19:21:13.289311 602969 cri.go:89] found id: "1449f483df90f5a531d350913e0aa7cdf914d18ed8dca152d252402554d37102"
I1213 19:21:13.289342 602969 cri.go:89] found id: ""
I1213 19:21:13.289352 602969 logs.go:282] 1 containers: [1449f483df90f5a531d350913e0aa7cdf914d18ed8dca152d252402554d37102]
I1213 19:21:13.289424 602969 ssh_runner.go:195] Run: which crictl
I1213 19:21:13.294609 602969 cri.go:54] listing CRI containers in root : {State:all Name:kube-controller-manager Namespaces:[]}
I1213 19:21:13.294728 602969 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1213 19:21:13.366508 602969 cri.go:89] found id: "4283a1804a94cc88954082e4f508a1cbf5f868d2c0662a9d3f0e826e9b6c5f1a"
I1213 19:21:13.366557 602969 cri.go:89] found id: ""
I1213 19:21:13.366567 602969 logs.go:282] 1 containers: [4283a1804a94cc88954082e4f508a1cbf5f868d2c0662a9d3f0e826e9b6c5f1a]
I1213 19:21:13.366656 602969 ssh_runner.go:195] Run: which crictl
I1213 19:21:13.372576 602969 cri.go:54] listing CRI containers in root : {State:all Name:kindnet Namespaces:[]}
I1213 19:21:13.372670 602969 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1213 19:21:13.416348 602969 cri.go:89] found id: "da25e26a83aad96484fe1eecafba3a8b5e62f5486ff06573a87eb752e248b7f3"
I1213 19:21:13.416425 602969 cri.go:89] found id: ""
I1213 19:21:13.416449 602969 logs.go:282] 1 containers: [da25e26a83aad96484fe1eecafba3a8b5e62f5486ff06573a87eb752e248b7f3]
I1213 19:21:13.416529 602969 ssh_runner.go:195] Run: which crictl
I1213 19:21:13.420352 602969 logs.go:123] Gathering logs for kube-scheduler [833e3ba74cac9f8b814ea06aeafe171150043188b329c9d621a03c75dbc4578f] ...
I1213 19:21:13.420391 602969 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 833e3ba74cac9f8b814ea06aeafe171150043188b329c9d621a03c75dbc4578f"
I1213 19:21:13.474383 602969 logs.go:123] Gathering logs for kindnet [da25e26a83aad96484fe1eecafba3a8b5e62f5486ff06573a87eb752e248b7f3] ...
I1213 19:21:13.474428 602969 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 da25e26a83aad96484fe1eecafba3a8b5e62f5486ff06573a87eb752e248b7f3"
I1213 19:21:13.522419 602969 logs.go:123] Gathering logs for CRI-O ...
I1213 19:21:13.522452 602969 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u crio -n 400"
I1213 19:21:13.617192 602969 logs.go:123] Gathering logs for dmesg ...
I1213 19:21:13.617230 602969 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1213 19:21:13.634890 602969 logs.go:123] Gathering logs for describe nodes ...
I1213 19:21:13.634918 602969 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.2/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
I1213 19:21:13.784911 602969 logs.go:123] Gathering logs for coredns [d5719b1b478dec4dc55817883d4f9577dc475cde036d3e161334d371c1e81411] ...
I1213 19:21:13.784948 602969 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 d5719b1b478dec4dc55817883d4f9577dc475cde036d3e161334d371c1e81411"
I1213 19:21:13.828683 602969 logs.go:123] Gathering logs for kube-proxy [1449f483df90f5a531d350913e0aa7cdf914d18ed8dca152d252402554d37102] ...
I1213 19:21:13.828720 602969 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 1449f483df90f5a531d350913e0aa7cdf914d18ed8dca152d252402554d37102"
I1213 19:21:13.868810 602969 logs.go:123] Gathering logs for kube-controller-manager [4283a1804a94cc88954082e4f508a1cbf5f868d2c0662a9d3f0e826e9b6c5f1a] ...
I1213 19:21:13.868851 602969 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 4283a1804a94cc88954082e4f508a1cbf5f868d2c0662a9d3f0e826e9b6c5f1a"
I1213 19:21:13.966616 602969 logs.go:123] Gathering logs for container status ...
I1213 19:21:13.966653 602969 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1213 19:21:14.036058 602969 logs.go:123] Gathering logs for kubelet ...
I1213 19:21:14.036098 602969 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
W1213 19:21:14.100263 602969 logs.go:138] Found kubelet problem: Dec 13 19:19:23 addons-248098 kubelet[1527]: W1213 19:19:23.865745 1527 reflector.go:561] object-"kube-system"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:addons-248098" cannot list resource "configmaps" in API group "" in the namespace "kube-system": no relationship found between node 'addons-248098' and this object
W1213 19:21:14.100509 602969 logs.go:138] Found kubelet problem: Dec 13 19:19:23 addons-248098 kubelet[1527]: E1213 19:19:23.865827 1527 reflector.go:158] "Unhandled Error" err="object-\"kube-system\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:addons-248098\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\": no relationship found between node 'addons-248098' and this object" logger="UnhandledError"
W1213 19:21:14.115979 602969 logs.go:138] Found kubelet problem: Dec 13 19:19:39 addons-248098 kubelet[1527]: W1213 19:19:39.477016 1527 reflector.go:561] object-"gcp-auth"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:addons-248098" cannot list resource "configmaps" in API group "" in the namespace "gcp-auth": no relationship found between node 'addons-248098' and this object
W1213 19:21:14.116213 602969 logs.go:138] Found kubelet problem: Dec 13 19:19:39 addons-248098 kubelet[1527]: E1213 19:19:39.477065 1527 reflector.go:158] "Unhandled Error" err="object-\"gcp-auth\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:addons-248098\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"gcp-auth\": no relationship found between node 'addons-248098' and this object" logger="UnhandledError"
I1213 19:21:14.156559 602969 logs.go:123] Gathering logs for kube-apiserver [27ee00545a23ca3d022b68468445151371316d97acbaf2235b93791b944d3e2d] ...
I1213 19:21:14.156587 602969 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 27ee00545a23ca3d022b68468445151371316d97acbaf2235b93791b944d3e2d"
I1213 19:21:14.211656 602969 logs.go:123] Gathering logs for etcd [289abb226f700e5f3c20a349d1479217ce3aa4bc311be8aa6d6f12374b0cb68a] ...
I1213 19:21:14.211697 602969 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 289abb226f700e5f3c20a349d1479217ce3aa4bc311be8aa6d6f12374b0cb68a"
I1213 19:21:14.260185 602969 out.go:358] Setting ErrFile to fd 2...
I1213 19:21:14.260216 602969 out.go:392] TERM=,COLORTERM=, which probably does not support color
W1213 19:21:14.260273 602969 out.go:270] X Problems detected in kubelet:
W1213 19:21:14.260311 602969 out.go:270] Dec 13 19:19:23 addons-248098 kubelet[1527]: W1213 19:19:23.865745 1527 reflector.go:561] object-"kube-system"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:addons-248098" cannot list resource "configmaps" in API group "" in the namespace "kube-system": no relationship found between node 'addons-248098' and this object
W1213 19:21:14.260318 602969 out.go:270] Dec 13 19:19:23 addons-248098 kubelet[1527]: E1213 19:19:23.865827 1527 reflector.go:158] "Unhandled Error" err="object-\"kube-system\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:addons-248098\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\": no relationship found between node 'addons-248098' and this object" logger="UnhandledError"
W1213 19:21:14.260325 602969 out.go:270] Dec 13 19:19:39 addons-248098 kubelet[1527]: W1213 19:19:39.477016 1527 reflector.go:561] object-"gcp-auth"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:addons-248098" cannot list resource "configmaps" in API group "" in the namespace "gcp-auth": no relationship found between node 'addons-248098' and this object
W1213 19:21:14.260332 602969 out.go:270] Dec 13 19:19:39 addons-248098 kubelet[1527]: E1213 19:19:39.477065 1527 reflector.go:158] "Unhandled Error" err="object-\"gcp-auth\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:addons-248098\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"gcp-auth\": no relationship found between node 'addons-248098' and this object" logger="UnhandledError"
I1213 19:21:14.260337 602969 out.go:358] Setting ErrFile to fd 2...
I1213 19:21:14.260346 602969 out.go:392] TERM=,COLORTERM=, which probably does not support color
I1213 19:21:24.272192 602969 system_pods.go:59] 18 kube-system pods found
I1213 19:21:24.272235 602969 system_pods.go:61] "coredns-7c65d6cfc9-bt6ls" [23b8e6b9-51eb-4a14-bee8-7eacdb154832] Running
I1213 19:21:24.272242 602969 system_pods.go:61] "csi-hostpath-attacher-0" [98592c8c-f15c-40c5-831b-2239874143ea] Running
I1213 19:21:24.272247 602969 system_pods.go:61] "csi-hostpath-resizer-0" [14cdb963-4eb9-4472-8a01-549e09a55047] Running
I1213 19:21:24.272255 602969 system_pods.go:61] "csi-hostpathplugin-l2fk7" [30df306a-dc88-4eb0-aa19-d35529eda401] Running
I1213 19:21:24.272260 602969 system_pods.go:61] "etcd-addons-248098" [014814e1-1087-4331-aeb4-7fd59c3165e5] Running
I1213 19:21:24.272264 602969 system_pods.go:61] "kindnet-n9pvh" [7e6398f0-53e1-4774-bdd6-211a800d8291] Running
I1213 19:21:24.272268 602969 system_pods.go:61] "kube-apiserver-addons-248098" [a3e569f6-6078-4dc0-a3b2-764a0180614c] Running
I1213 19:21:24.272273 602969 system_pods.go:61] "kube-controller-manager-addons-248098" [b6473627-2b96-431a-9082-99576908ad11] Running
I1213 19:21:24.272284 602969 system_pods.go:61] "kube-ingress-dns-minikube" [53321af4-b841-467d-af38-89b82188ff1d] Running
I1213 19:21:24.272289 602969 system_pods.go:61] "kube-proxy-rcbrb" [fb396ab8-720d-41c3-9d2b-d1b2fb666b0b] Running
I1213 19:21:24.272296 602969 system_pods.go:61] "kube-scheduler-addons-248098" [ac75ce0f-098a-4f6d-9e98-697f3b89e854] Running
I1213 19:21:24.272300 602969 system_pods.go:61] "metrics-server-84c5f94fbc-g7jcr" [a41f7493-f390-4111-9ecf-6b9c91d88986] Running
I1213 19:21:24.272305 602969 system_pods.go:61] "nvidia-device-plugin-daemonset-xsrsn" [bfc935e3-d013-494e-8380-5b4be1f7a0c9] Running
I1213 19:21:24.272312 602969 system_pods.go:61] "registry-5cc95cd69-5n4c9" [7ec0f719-ff86-4cc0-9868-18a171b8d618] Running
I1213 19:21:24.272316 602969 system_pods.go:61] "registry-proxy-nvc8d" [c14eabdb-94a1-4ed0-8a97-51210e96f13a] Running
I1213 19:21:24.272321 602969 system_pods.go:61] "snapshot-controller-56fcc65765-ltsx9" [7191195a-2231-4fe5-9bf3-ba875b3ceeb5] Running
I1213 19:21:24.272335 602969 system_pods.go:61] "snapshot-controller-56fcc65765-sqhl4" [a11c4e23-9e52-4164-b6f3-f29f74154fab] Running
I1213 19:21:24.272339 602969 system_pods.go:61] "storage-provisioner" [1d273a3f-36bb-4847-ad88-3544cda8cde5] Running
I1213 19:21:24.272344 602969 system_pods.go:74] duration metric: took 11.260676188s to wait for pod list to return data ...
I1213 19:21:24.272356 602969 default_sa.go:34] waiting for default service account to be created ...
I1213 19:21:24.275135 602969 default_sa.go:45] found service account: "default"
I1213 19:21:24.275162 602969 default_sa.go:55] duration metric: took 2.799619ms for default service account to be created ...
I1213 19:21:24.275172 602969 system_pods.go:116] waiting for k8s-apps to be running ...
I1213 19:21:24.286490 602969 system_pods.go:86] 18 kube-system pods found
I1213 19:21:24.286530 602969 system_pods.go:89] "coredns-7c65d6cfc9-bt6ls" [23b8e6b9-51eb-4a14-bee8-7eacdb154832] Running
I1213 19:21:24.286539 602969 system_pods.go:89] "csi-hostpath-attacher-0" [98592c8c-f15c-40c5-831b-2239874143ea] Running
I1213 19:21:24.286544 602969 system_pods.go:89] "csi-hostpath-resizer-0" [14cdb963-4eb9-4472-8a01-549e09a55047] Running
I1213 19:21:24.286550 602969 system_pods.go:89] "csi-hostpathplugin-l2fk7" [30df306a-dc88-4eb0-aa19-d35529eda401] Running
I1213 19:21:24.286555 602969 system_pods.go:89] "etcd-addons-248098" [014814e1-1087-4331-aeb4-7fd59c3165e5] Running
I1213 19:21:24.286560 602969 system_pods.go:89] "kindnet-n9pvh" [7e6398f0-53e1-4774-bdd6-211a800d8291] Running
I1213 19:21:24.286565 602969 system_pods.go:89] "kube-apiserver-addons-248098" [a3e569f6-6078-4dc0-a3b2-764a0180614c] Running
I1213 19:21:24.286570 602969 system_pods.go:89] "kube-controller-manager-addons-248098" [b6473627-2b96-431a-9082-99576908ad11] Running
I1213 19:21:24.286574 602969 system_pods.go:89] "kube-ingress-dns-minikube" [53321af4-b841-467d-af38-89b82188ff1d] Running
I1213 19:21:24.286579 602969 system_pods.go:89] "kube-proxy-rcbrb" [fb396ab8-720d-41c3-9d2b-d1b2fb666b0b] Running
I1213 19:21:24.286583 602969 system_pods.go:89] "kube-scheduler-addons-248098" [ac75ce0f-098a-4f6d-9e98-697f3b89e854] Running
I1213 19:21:24.286588 602969 system_pods.go:89] "metrics-server-84c5f94fbc-g7jcr" [a41f7493-f390-4111-9ecf-6b9c91d88986] Running
I1213 19:21:24.286591 602969 system_pods.go:89] "nvidia-device-plugin-daemonset-xsrsn" [bfc935e3-d013-494e-8380-5b4be1f7a0c9] Running
I1213 19:21:24.286595 602969 system_pods.go:89] "registry-5cc95cd69-5n4c9" [7ec0f719-ff86-4cc0-9868-18a171b8d618] Running
I1213 19:21:24.286599 602969 system_pods.go:89] "registry-proxy-nvc8d" [c14eabdb-94a1-4ed0-8a97-51210e96f13a] Running
I1213 19:21:24.286603 602969 system_pods.go:89] "snapshot-controller-56fcc65765-ltsx9" [7191195a-2231-4fe5-9bf3-ba875b3ceeb5] Running
I1213 19:21:24.286607 602969 system_pods.go:89] "snapshot-controller-56fcc65765-sqhl4" [a11c4e23-9e52-4164-b6f3-f29f74154fab] Running
I1213 19:21:24.286611 602969 system_pods.go:89] "storage-provisioner" [1d273a3f-36bb-4847-ad88-3544cda8cde5] Running
I1213 19:21:24.286618 602969 system_pods.go:126] duration metric: took 11.440315ms to wait for k8s-apps to be running ...
I1213 19:21:24.286645 602969 system_svc.go:44] waiting for kubelet service to be running ....
I1213 19:21:24.286737 602969 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1213 19:21:24.299683 602969 system_svc.go:56] duration metric: took 13.040573ms WaitForService to wait for kubelet
I1213 19:21:24.299710 602969 kubeadm.go:582] duration metric: took 2m3.069273573s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1213 19:21:24.299729 602969 node_conditions.go:102] verifying NodePressure condition ...
I1213 19:21:24.304220 602969 node_conditions.go:122] node storage ephemeral capacity is 203034800Ki
I1213 19:21:24.304264 602969 node_conditions.go:123] node cpu capacity is 2
I1213 19:21:24.304277 602969 node_conditions.go:105] duration metric: took 4.542452ms to run NodePressure ...
I1213 19:21:24.304291 602969 start.go:241] waiting for startup goroutines ...
I1213 19:21:24.304299 602969 start.go:246] waiting for cluster config update ...
I1213 19:21:24.304316 602969 start.go:255] writing updated cluster config ...
I1213 19:21:24.304631 602969 ssh_runner.go:195] Run: rm -f paused
I1213 19:21:24.730318 602969 start.go:600] kubectl: 1.32.0, cluster: 1.31.2 (minor skew: 1)
I1213 19:21:24.735680 602969 out.go:177] * Done! kubectl is now configured to use "addons-248098" cluster and "default" namespace by default
==> CRI-O <==
Dec 13 19:24:16 addons-248098 crio[989]: time="2024-12-13 19:24:16.954503584Z" level=info msg="Removed pod sandbox: 2db93e8ad44fd6457c70a23a15a6c08dd4000c0f040f289884c6e7bc897ccf63" id=701a29ca-b195-4229-b64f-b49e8feb985d name=/runtime.v1.RuntimeService/RemovePodSandbox
Dec 13 19:25:50 addons-248098 crio[989]: time="2024-12-13 19:25:50.149452955Z" level=info msg="Running pod sandbox: default/hello-world-app-55bf9c44b4-z9wlr/POD" id=0b80c7fb-507a-4fd4-999d-b473e2e18082 name=/runtime.v1.RuntimeService/RunPodSandbox
Dec 13 19:25:50 addons-248098 crio[989]: time="2024-12-13 19:25:50.149518015Z" level=warning msg="Allowed annotations are specified for workload []"
Dec 13 19:25:50 addons-248098 crio[989]: time="2024-12-13 19:25:50.208541999Z" level=info msg="Got pod network &{Name:hello-world-app-55bf9c44b4-z9wlr Namespace:default ID:0fc8cd343fdb5dd786eec1281025c81b44fbc34607773c0b4fc784bfbc42df2e UID:14af58da-f64c-47d5-98c4-0b019b2ce7f2 NetNS:/var/run/netns/74b4da3b-468e-4828-a1b4-fef8416cb004 Networks:[] RuntimeConfig:map[kindnet:{IP: MAC: PortMappings:[] Bandwidth:<nil> IpRanges:[]}] Aliases:map[]}"
Dec 13 19:25:50 addons-248098 crio[989]: time="2024-12-13 19:25:50.208586447Z" level=info msg="Adding pod default_hello-world-app-55bf9c44b4-z9wlr to CNI network \"kindnet\" (type=ptp)"
Dec 13 19:25:50 addons-248098 crio[989]: time="2024-12-13 19:25:50.230085173Z" level=info msg="Got pod network &{Name:hello-world-app-55bf9c44b4-z9wlr Namespace:default ID:0fc8cd343fdb5dd786eec1281025c81b44fbc34607773c0b4fc784bfbc42df2e UID:14af58da-f64c-47d5-98c4-0b019b2ce7f2 NetNS:/var/run/netns/74b4da3b-468e-4828-a1b4-fef8416cb004 Networks:[] RuntimeConfig:map[kindnet:{IP: MAC: PortMappings:[] Bandwidth:<nil> IpRanges:[]}] Aliases:map[]}"
Dec 13 19:25:50 addons-248098 crio[989]: time="2024-12-13 19:25:50.230358587Z" level=info msg="Checking pod default_hello-world-app-55bf9c44b4-z9wlr for CNI network kindnet (type=ptp)"
Dec 13 19:25:50 addons-248098 crio[989]: time="2024-12-13 19:25:50.234102175Z" level=info msg="Ran pod sandbox 0fc8cd343fdb5dd786eec1281025c81b44fbc34607773c0b4fc784bfbc42df2e with infra container: default/hello-world-app-55bf9c44b4-z9wlr/POD" id=0b80c7fb-507a-4fd4-999d-b473e2e18082 name=/runtime.v1.RuntimeService/RunPodSandbox
Dec 13 19:25:50 addons-248098 crio[989]: time="2024-12-13 19:25:50.235449934Z" level=info msg="Checking image status: docker.io/kicbase/echo-server:1.0" id=789157a9-c175-4f7d-b34a-7c87dcb6b152 name=/runtime.v1.ImageService/ImageStatus
Dec 13 19:25:50 addons-248098 crio[989]: time="2024-12-13 19:25:50.235685104Z" level=info msg="Image docker.io/kicbase/echo-server:1.0 not found" id=789157a9-c175-4f7d-b34a-7c87dcb6b152 name=/runtime.v1.ImageService/ImageStatus
Dec 13 19:25:50 addons-248098 crio[989]: time="2024-12-13 19:25:50.237957190Z" level=info msg="Pulling image: docker.io/kicbase/echo-server:1.0" id=ffa1ff09-45d1-4bf9-bb17-c23e415e8251 name=/runtime.v1.ImageService/PullImage
Dec 13 19:25:50 addons-248098 crio[989]: time="2024-12-13 19:25:50.240456299Z" level=info msg="Trying to access \"docker.io/kicbase/echo-server:1.0\""
Dec 13 19:25:50 addons-248098 crio[989]: time="2024-12-13 19:25:50.523770140Z" level=info msg="Trying to access \"docker.io/kicbase/echo-server:1.0\""
Dec 13 19:25:51 addons-248098 crio[989]: time="2024-12-13 19:25:51.310529399Z" level=info msg="Pulled image: docker.io/kicbase/echo-server@sha256:127ac38a2bb9537b7f252addff209ea6801edcac8a92c8b1104dacd66a583ed6" id=ffa1ff09-45d1-4bf9-bb17-c23e415e8251 name=/runtime.v1.ImageService/PullImage
Dec 13 19:25:51 addons-248098 crio[989]: time="2024-12-13 19:25:51.311586963Z" level=info msg="Checking image status: docker.io/kicbase/echo-server:1.0" id=80380c2c-8b1f-4fb6-9ebc-b17961ad4379 name=/runtime.v1.ImageService/ImageStatus
Dec 13 19:25:51 addons-248098 crio[989]: time="2024-12-13 19:25:51.312299308Z" level=info msg="Image status: &ImageStatusResponse{Image:&Image{Id:ce2d2cda2d858fdaea84129deb86d18e5dbf1c548f230b79fdca74cc91729d17,RepoTags:[docker.io/kicbase/echo-server:1.0],RepoDigests:[docker.io/kicbase/echo-server@sha256:127ac38a2bb9537b7f252addff209ea6801edcac8a92c8b1104dacd66a583ed6 docker.io/kicbase/echo-server@sha256:42a89d9b22e5307cb88494990d5d929c401339f508c0a7e98a4d8ac52623fc5b],Size_:4789170,Uid:nil,Username:,Spec:nil,},Info:map[string]string{},}" id=80380c2c-8b1f-4fb6-9ebc-b17961ad4379 name=/runtime.v1.ImageService/ImageStatus
Dec 13 19:25:51 addons-248098 crio[989]: time="2024-12-13 19:25:51.313428964Z" level=info msg="Checking image status: docker.io/kicbase/echo-server:1.0" id=26d24bab-1ee3-414a-a310-38a0978faf00 name=/runtime.v1.ImageService/ImageStatus
Dec 13 19:25:51 addons-248098 crio[989]: time="2024-12-13 19:25:51.314137124Z" level=info msg="Image status: &ImageStatusResponse{Image:&Image{Id:ce2d2cda2d858fdaea84129deb86d18e5dbf1c548f230b79fdca74cc91729d17,RepoTags:[docker.io/kicbase/echo-server:1.0],RepoDigests:[docker.io/kicbase/echo-server@sha256:127ac38a2bb9537b7f252addff209ea6801edcac8a92c8b1104dacd66a583ed6 docker.io/kicbase/echo-server@sha256:42a89d9b22e5307cb88494990d5d929c401339f508c0a7e98a4d8ac52623fc5b],Size_:4789170,Uid:nil,Username:,Spec:nil,},Info:map[string]string{},}" id=26d24bab-1ee3-414a-a310-38a0978faf00 name=/runtime.v1.ImageService/ImageStatus
Dec 13 19:25:51 addons-248098 crio[989]: time="2024-12-13 19:25:51.315582895Z" level=info msg="Creating container: default/hello-world-app-55bf9c44b4-z9wlr/hello-world-app" id=dae04327-0b78-4e44-8900-0bce8551e926 name=/runtime.v1.RuntimeService/CreateContainer
Dec 13 19:25:51 addons-248098 crio[989]: time="2024-12-13 19:25:51.315678190Z" level=warning msg="Allowed annotations are specified for workload []"
Dec 13 19:25:51 addons-248098 crio[989]: time="2024-12-13 19:25:51.349556749Z" level=warning msg="Failed to open /etc/passwd: open /var/lib/containers/storage/overlay/7b5415019a9546f4dc15ddba13245a5d40d0cf415a3aaa00c98e313f9b292b14/merged/etc/passwd: no such file or directory"
Dec 13 19:25:51 addons-248098 crio[989]: time="2024-12-13 19:25:51.349762199Z" level=warning msg="Failed to open /etc/group: open /var/lib/containers/storage/overlay/7b5415019a9546f4dc15ddba13245a5d40d0cf415a3aaa00c98e313f9b292b14/merged/etc/group: no such file or directory"
Dec 13 19:25:51 addons-248098 crio[989]: time="2024-12-13 19:25:51.400996448Z" level=info msg="Created container 9d9515c6509ec7bb23852d6aa7f02ef4d2f61cabd27c4f4fc716428b7f281145: default/hello-world-app-55bf9c44b4-z9wlr/hello-world-app" id=dae04327-0b78-4e44-8900-0bce8551e926 name=/runtime.v1.RuntimeService/CreateContainer
Dec 13 19:25:51 addons-248098 crio[989]: time="2024-12-13 19:25:51.401533505Z" level=info msg="Starting container: 9d9515c6509ec7bb23852d6aa7f02ef4d2f61cabd27c4f4fc716428b7f281145" id=40c00b08-8282-4650-a059-51b3fdf97b02 name=/runtime.v1.RuntimeService/StartContainer
Dec 13 19:25:51 addons-248098 crio[989]: time="2024-12-13 19:25:51.408634051Z" level=info msg="Started container" PID=9370 containerID=9d9515c6509ec7bb23852d6aa7f02ef4d2f61cabd27c4f4fc716428b7f281145 description=default/hello-world-app-55bf9c44b4-z9wlr/hello-world-app id=40c00b08-8282-4650-a059-51b3fdf97b02 name=/runtime.v1.RuntimeService/StartContainer sandboxID=0fc8cd343fdb5dd786eec1281025c81b44fbc34607773c0b4fc784bfbc42df2e
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD
9d9515c6509ec docker.io/kicbase/echo-server@sha256:127ac38a2bb9537b7f252addff209ea6801edcac8a92c8b1104dacd66a583ed6 Less than a second ago Running hello-world-app 0 0fc8cd343fdb5 hello-world-app-55bf9c44b4-z9wlr
b7d7a44eec17b docker.io/library/nginx@sha256:41523187cf7d7a2f2677a80609d9caa14388bf5c1fbca9c410ba3de602aaaab4 2 minutes ago Running nginx 0 168334f3be3f6 nginx
0ee8eaa9b3f42 gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e 4 minutes ago Running busybox 0 0ae8365e4a516 busybox
bda0caff014ee registry.k8s.io/ingress-nginx/controller@sha256:787a5408fa511266888b2e765f9666bee67d9bf2518a6b7cfd4ab6cc01c22eee 5 minutes ago Running controller 0 0ec565a7d84e5 ingress-nginx-controller-5f85ff4588-77ds6
90583dbe72d4a gcr.io/k8s-minikube/minikube-ingress-dns@sha256:4211a1de532376c881851542238121b26792225faa36a7b02dccad88fd05797c 5 minutes ago Running minikube-ingress-dns 0 657f4440d1fda kube-ingress-dns-minikube
1503028b745d0 docker.io/rancher/local-path-provisioner@sha256:689a2489a24e74426e4a4666e611c988202c5fa995908b0c60133aca3eb87d98 5 minutes ago Running local-path-provisioner 0 9063ee2cd8175 local-path-provisioner-86d989889c-rgd6q
e10ba2c21305f d54655ed3a8543a162b688a24bf969ee1a28d906b8ccb30188059247efdae234 5 minutes ago Exited patch 2 13679a5079d48 ingress-nginx-admission-patch-7r99g
999049ad75afc registry.k8s.io/ingress-nginx/kube-webhook-certgen@sha256:0550b75a965592f1dde3fbeaa98f67a1e10c5a086bcd69a29054cc4edcb56771 6 minutes ago Exited create 0 0ad06363f4b95 ingress-nginx-admission-create-2fpd2
eb0c779bf9b1d registry.k8s.io/metrics-server/metrics-server@sha256:048bcf48fc2cce517a61777e22bac782ba59ea5e9b9a54bcb42dbee99566a91f 6 minutes ago Running metrics-server 0 25e7603213900 metrics-server-84c5f94fbc-g7jcr
d5719b1b478de 2f6c962e7b8311337352d9fdea917da2184d9919f4da7695bc2a6517cf392fe4 6 minutes ago Running coredns 0 5c0b264fe641c coredns-7c65d6cfc9-bt6ls
0c0704d382a69 ba04bb24b95753201135cbc420b233c1b0b9fa2e1fd21d28319c348c33fbcde6 6 minutes ago Running storage-provisioner 0 d7807360953a9 storage-provisioner
da25e26a83aad docker.io/kindest/kindnetd@sha256:de216f6245e142905c8022d424959a65f798fcd26f5b7492b9c0b0391d735c3e 6 minutes ago Running kindnet-cni 0 96f405480c5da kindnet-n9pvh
1449f483df90f 021d2420133054f8835987db659750ff639ab6863776460264dd8025c06644ba 6 minutes ago Running kube-proxy 0 9de7aa20493ea kube-proxy-rcbrb
27ee00545a23c f9c26480f1e722a7d05d7f1bb339180b19f941b23bcc928208e362df04a61270 6 minutes ago Running kube-apiserver 0 7082116ed71bc kube-apiserver-addons-248098
289abb226f700 27e3830e1402783674d8b594038967deea9d51f0d91b34c93c8f39d2f68af7da 6 minutes ago Running etcd 0 7412a2a5bc972 etcd-addons-248098
833e3ba74cac9 d6b061e73ae454743cbfe0e3479aa23e4ed65c61d38b4408a1e7f3d3859dda8a 6 minutes ago Running kube-scheduler 0 249b5349b7b11 kube-scheduler-addons-248098
4283a1804a94c 9404aea098d9e80cb648d86c07d56130a1fe875ed7c2526251c2ae68a9bf07ba 6 minutes ago Running kube-controller-manager 0 680c82ba028a7 kube-controller-manager-addons-248098
==> coredns [d5719b1b478dec4dc55817883d4f9577dc475cde036d3e161334d371c1e81411] <==
[INFO] 10.244.0.6:47673 - 45497 "A IN registry.kube-system.svc.cluster.local.us-east-2.compute.internal. udp 94 false 1232" NXDOMAIN qr,rd,ra 83 0.002309471s
[INFO] 10.244.0.6:47673 - 31481 "AAAA IN registry.kube-system.svc.cluster.local. udp 67 false 1232" NOERROR qr,aa,rd 149 0.000141221s
[INFO] 10.244.0.6:47673 - 25493 "A IN registry.kube-system.svc.cluster.local. udp 67 false 1232" NOERROR qr,aa,rd 110 0.000103812s
[INFO] 10.244.0.6:57376 - 41212 "AAAA IN registry.kube-system.svc.cluster.local.kube-system.svc.cluster.local. udp 86 false 512" NXDOMAIN qr,aa,rd 179 0.000111632s
[INFO] 10.244.0.6:57376 - 40727 "A IN registry.kube-system.svc.cluster.local.kube-system.svc.cluster.local. udp 86 false 512" NXDOMAIN qr,aa,rd 179 0.0000934s
[INFO] 10.244.0.6:58629 - 7071 "AAAA IN registry.kube-system.svc.cluster.local.svc.cluster.local. udp 74 false 512" NXDOMAIN qr,aa,rd 167 0.000069327s
[INFO] 10.244.0.6:58629 - 6858 "A IN registry.kube-system.svc.cluster.local.svc.cluster.local. udp 74 false 512" NXDOMAIN qr,aa,rd 167 0.000047821s
[INFO] 10.244.0.6:33103 - 15392 "AAAA IN registry.kube-system.svc.cluster.local.cluster.local. udp 70 false 512" NXDOMAIN qr,aa,rd 163 0.000062811s
[INFO] 10.244.0.6:33103 - 15212 "A IN registry.kube-system.svc.cluster.local.cluster.local. udp 70 false 512" NXDOMAIN qr,aa,rd 163 0.000053696s
[INFO] 10.244.0.6:49297 - 22745 "A IN registry.kube-system.svc.cluster.local.us-east-2.compute.internal. udp 83 false 512" NXDOMAIN qr,rd,ra 83 0.001473972s
[INFO] 10.244.0.6:49297 - 22967 "AAAA IN registry.kube-system.svc.cluster.local.us-east-2.compute.internal. udp 83 false 512" NXDOMAIN qr,rd,ra 83 0.001791359s
[INFO] 10.244.0.6:59233 - 22957 "A IN registry.kube-system.svc.cluster.local. udp 56 false 512" NOERROR qr,aa,rd 110 0.000117097s
[INFO] 10.244.0.6:59233 - 23113 "AAAA IN registry.kube-system.svc.cluster.local. udp 56 false 512" NOERROR qr,aa,rd 149 0.00022118s
[INFO] 10.244.0.21:38003 - 53913 "AAAA IN storage.googleapis.com.gcp-auth.svc.cluster.local. udp 78 false 1232" NXDOMAIN qr,aa,rd 160 0.000286453s
[INFO] 10.244.0.21:33082 - 31154 "A IN storage.googleapis.com.gcp-auth.svc.cluster.local. udp 78 false 1232" NXDOMAIN qr,aa,rd 160 0.000283073s
[INFO] 10.244.0.21:32993 - 56738 "A IN storage.googleapis.com.svc.cluster.local. udp 69 false 1232" NXDOMAIN qr,aa,rd 151 0.00015041s
[INFO] 10.244.0.21:52200 - 51436 "AAAA IN storage.googleapis.com.svc.cluster.local. udp 69 false 1232" NXDOMAIN qr,aa,rd 151 0.000643726s
[INFO] 10.244.0.21:49264 - 4377 "AAAA IN storage.googleapis.com.cluster.local. udp 65 false 1232" NXDOMAIN qr,aa,rd 147 0.000156573s
[INFO] 10.244.0.21:37568 - 38618 "A IN storage.googleapis.com.cluster.local. udp 65 false 1232" NXDOMAIN qr,aa,rd 147 0.000124761s
[INFO] 10.244.0.21:43774 - 39369 "AAAA IN storage.googleapis.com.us-east-2.compute.internal. udp 78 false 1232" NXDOMAIN qr,rd,ra 67 0.002822955s
[INFO] 10.244.0.21:58530 - 50634 "A IN storage.googleapis.com.us-east-2.compute.internal. udp 78 false 1232" NXDOMAIN qr,rd,ra 67 0.003320298s
[INFO] 10.244.0.21:58104 - 29741 "A IN storage.googleapis.com. udp 51 false 1232" NOERROR qr,rd,ra 648 0.008294313s
[INFO] 10.244.0.21:47436 - 18000 "AAAA IN storage.googleapis.com. udp 51 false 1232" NOERROR qr,rd,ra 240 0.008930908s
[INFO] 10.244.0.24:44861 - 2 "AAAA IN registry.kube-system.svc.cluster.local. udp 56 false 512" NOERROR qr,aa,rd 149 0.000260664s
[INFO] 10.244.0.24:47366 - 3 "A IN registry.kube-system.svc.cluster.local. udp 56 false 512" NOERROR qr,aa,rd 110 0.000168904s
==> describe nodes <==
Name: addons-248098
Roles: control-plane
Labels: beta.kubernetes.io/arch=arm64
beta.kubernetes.io/os=linux
kubernetes.io/arch=arm64
kubernetes.io/hostname=addons-248098
kubernetes.io/os=linux
minikube.k8s.io/commit=68ea3eca706f73191794a96e3518c1d004192956
minikube.k8s.io/name=addons-248098
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2024_12_13T19_19_17_0700
minikube.k8s.io/version=v1.34.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
topology.hostpath.csi/node=addons-248098
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///var/run/crio/crio.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Fri, 13 Dec 2024 19:19:13 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: addons-248098
AcquireTime: <unset>
RenewTime: Fri, 13 Dec 2024 19:25:44 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Fri, 13 Dec 2024 19:23:52 +0000 Fri, 13 Dec 2024 19:19:10 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Fri, 13 Dec 2024 19:23:52 +0000 Fri, 13 Dec 2024 19:19:10 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Fri, 13 Dec 2024 19:23:52 +0000 Fri, 13 Dec 2024 19:19:10 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Fri, 13 Dec 2024 19:23:52 +0000 Fri, 13 Dec 2024 19:19:39 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.49.2
Hostname: addons-248098
Capacity:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022296Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022296Ki
pods: 110
System Info:
Machine ID: 0af0368374054463b8b1bd628ee8eb22
System UUID: dce25a95-cc3d-451b-b59c-5c92da6108a0
Boot ID: 8bc558cc-8777-4865-b401-e730957079d4
Kernel Version: 5.15.0-1072-aws
OS Image: Ubuntu 22.04.5 LTS
Operating System: linux
Architecture: arm64
Container Runtime Version: cri-o://1.24.6
Kubelet Version: v1.31.2
Kube-Proxy Version: v1.31.2
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (15 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 4m26s
default hello-world-app-55bf9c44b4-z9wlr 0 (0%) 0 (0%) 0 (0%) 0 (0%) 2s
default nginx 0 (0%) 0 (0%) 0 (0%) 0 (0%) 2m21s
ingress-nginx ingress-nginx-controller-5f85ff4588-77ds6 100m (5%) 0 (0%) 90Mi (1%) 0 (0%) 6m24s
kube-system coredns-7c65d6cfc9-bt6ls 100m (5%) 0 (0%) 70Mi (0%) 170Mi (2%) 6m28s
kube-system etcd-addons-248098 100m (5%) 0 (0%) 100Mi (1%) 0 (0%) 6m35s
kube-system kindnet-n9pvh 100m (5%) 100m (5%) 50Mi (0%) 50Mi (0%) 6m29s
kube-system kube-apiserver-addons-248098 250m (12%) 0 (0%) 0 (0%) 0 (0%) 6m35s
kube-system kube-controller-manager-addons-248098 200m (10%) 0 (0%) 0 (0%) 0 (0%) 6m35s
kube-system kube-ingress-dns-minikube 0 (0%) 0 (0%) 0 (0%) 0 (0%) 6m25s
kube-system kube-proxy-rcbrb 0 (0%) 0 (0%) 0 (0%) 0 (0%) 6m28s
kube-system kube-scheduler-addons-248098 100m (5%) 0 (0%) 0 (0%) 0 (0%) 6m35s
kube-system metrics-server-84c5f94fbc-g7jcr 100m (5%) 0 (0%) 200Mi (2%) 0 (0%) 6m25s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 6m25s
local-path-storage local-path-provisioner-86d989889c-rgd6q 0 (0%) 0 (0%) 0 (0%) 0 (0%) 6m25s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 1050m (52%) 100m (5%)
memory 510Mi (6%) 220Mi (2%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
hugepages-32Mi 0 (0%) 0 (0%)
hugepages-64Ki 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 6m23s kube-proxy
Normal Starting 6m35s kubelet Starting kubelet.
Warning CgroupV1 6m35s kubelet Cgroup v1 support is in maintenance mode, please migrate to Cgroup v2.
Normal NodeHasSufficientMemory 6m35s (x2 over 6m35s) kubelet Node addons-248098 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 6m35s (x2 over 6m35s) kubelet Node addons-248098 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 6m35s (x2 over 6m35s) kubelet Node addons-248098 status is now: NodeHasSufficientPID
Normal RegisteredNode 6m31s node-controller Node addons-248098 event: Registered Node addons-248098 in Controller
Normal NodeReady 6m12s kubelet Node addons-248098 status is now: NodeReady
==> dmesg <==
==> etcd [289abb226f700e5f3c20a349d1479217ce3aa4bc311be8aa6d6f12374b0cb68a] <==
{"level":"info","ts":"2024-12-13T19:19:10.590717Z","caller":"v3rpc/health.go:61","msg":"grpc service status changed","service":"","status":"SERVING"}
{"level":"info","ts":"2024-12-13T19:19:10.591625Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.49.2:2379"}
{"level":"info","ts":"2024-12-13T19:19:21.946468Z","caller":"traceutil/trace.go:171","msg":"trace[936921410] transaction","detail":"{read_only:false; response_revision:316; number_of_response:1; }","duration":"127.534977ms","start":"2024-12-13T19:19:21.818902Z","end":"2024-12-13T19:19:21.946437Z","steps":["trace[936921410] 'process raft request' (duration: 43.075222ms)"],"step_count":1}
{"level":"info","ts":"2024-12-13T19:19:22.355014Z","caller":"traceutil/trace.go:171","msg":"trace[1311423016] transaction","detail":"{read_only:false; response_revision:318; number_of_response:1; }","duration":"138.771034ms","start":"2024-12-13T19:19:22.216225Z","end":"2024-12-13T19:19:22.354996Z","steps":["trace[1311423016] 'process raft request' (duration: 138.610383ms)"],"step_count":1}
{"level":"info","ts":"2024-12-13T19:19:22.401520Z","caller":"traceutil/trace.go:171","msg":"trace[1286345005] transaction","detail":"{read_only:false; response_revision:319; number_of_response:1; }","duration":"185.164389ms","start":"2024-12-13T19:19:22.216333Z","end":"2024-12-13T19:19:22.401497Z","steps":["trace[1286345005] 'process raft request' (duration: 138.615208ms)"],"step_count":1}
{"level":"info","ts":"2024-12-13T19:19:22.417210Z","caller":"traceutil/trace.go:171","msg":"trace[822680764] linearizableReadLoop","detail":"{readStateIndex:326; appliedIndex:325; }","duration":"200.919964ms","start":"2024-12-13T19:19:22.216276Z","end":"2024-12-13T19:19:22.417196Z","steps":["trace[822680764] 'read index received' (duration: 103.548881ms)","trace[822680764] 'applied index is now lower than readState.Index' (duration: 97.370534ms)"],"step_count":2}
{"level":"warn","ts":"2024-12-13T19:19:22.417327Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"201.029357ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/serviceaccounts/kube-system/replicaset-controller\" ","response":"range_response_count:1 size:207"}
{"level":"info","ts":"2024-12-13T19:19:22.502403Z","caller":"traceutil/trace.go:171","msg":"trace[322309030] range","detail":"{range_begin:/registry/serviceaccounts/kube-system/replicaset-controller; range_end:; response_count:1; response_revision:320; }","duration":"286.109173ms","start":"2024-12-13T19:19:22.216272Z","end":"2024-12-13T19:19:22.502382Z","steps":["trace[322309030] 'agreement among raft nodes before linearized reading' (duration: 200.970115ms)"],"step_count":1}
{"level":"info","ts":"2024-12-13T19:19:22.417422Z","caller":"traceutil/trace.go:171","msg":"trace[1489321000] transaction","detail":"{read_only:false; response_revision:320; number_of_response:1; }","duration":"162.451632ms","start":"2024-12-13T19:19:22.254964Z","end":"2024-12-13T19:19:22.417415Z","steps":["trace[1489321000] 'process raft request' (duration: 162.137149ms)"],"step_count":1}
{"level":"info","ts":"2024-12-13T19:19:22.571107Z","caller":"traceutil/trace.go:171","msg":"trace[2079234850] transaction","detail":"{read_only:false; response_revision:321; number_of_response:1; }","duration":"250.729686ms","start":"2024-12-13T19:19:22.320359Z","end":"2024-12-13T19:19:22.571089Z","steps":["trace[2079234850] 'process raft request' (duration: 209.514181ms)","trace[2079234850] 'compare' (duration: 37.338718ms)"],"step_count":2}
{"level":"info","ts":"2024-12-13T19:19:22.579490Z","caller":"traceutil/trace.go:171","msg":"trace[1110984546] transaction","detail":"{read_only:false; response_revision:322; number_of_response:1; }","duration":"258.994085ms","start":"2024-12-13T19:19:22.320480Z","end":"2024-12-13T19:19:22.579474Z","steps":["trace[1110984546] 'process raft request' (duration: 250.342153ms)"],"step_count":1}
{"level":"warn","ts":"2024-12-13T19:19:22.628459Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"308.003647ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/configmaps/kube-system/coredns\" ","response":"range_response_count:1 size:612"}
{"level":"info","ts":"2024-12-13T19:19:22.649678Z","caller":"traceutil/trace.go:171","msg":"trace[627535555] range","detail":"{range_begin:/registry/configmaps/kube-system/coredns; range_end:; response_count:1; response_revision:322; }","duration":"329.22598ms","start":"2024-12-13T19:19:22.320426Z","end":"2024-12-13T19:19:22.649652Z","steps":["trace[627535555] 'agreement among raft nodes before linearized reading' (duration: 279.7766ms)","trace[627535555] 'range keys from bolt db' (duration: 25.603758ms)"],"step_count":2}
{"level":"warn","ts":"2024-12-13T19:19:22.649954Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2024-12-13T19:19:22.320406Z","time spent":"329.514526ms","remote":"127.0.0.1:37482","response type":"/etcdserverpb.KV/Range","request count":0,"request size":42,"response count":1,"response size":636,"request content":"key:\"/registry/configmaps/kube-system/coredns\" "}
{"level":"warn","ts":"2024-12-13T19:19:22.650627Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"330.164536ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/namespaces\" limit:1 ","response":"range_response_count:0 size:5"}
{"level":"info","ts":"2024-12-13T19:19:22.656078Z","caller":"traceutil/trace.go:171","msg":"trace[92943196] range","detail":"{range_begin:/registry/namespaces; range_end:; response_count:0; response_revision:322; }","duration":"335.61055ms","start":"2024-12-13T19:19:22.320447Z","end":"2024-12-13T19:19:22.656058Z","steps":["trace[92943196] 'agreement among raft nodes before linearized reading' (duration: 330.139543ms)"],"step_count":1}
{"level":"warn","ts":"2024-12-13T19:19:22.656260Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2024-12-13T19:19:22.320436Z","time spent":"335.796201ms","remote":"127.0.0.1:37498","response type":"/etcdserverpb.KV/Range","request count":0,"request size":24,"response count":0,"response size":29,"request content":"key:\"/registry/namespaces\" limit:1 "}
{"level":"info","ts":"2024-12-13T19:19:22.715634Z","caller":"traceutil/trace.go:171","msg":"trace[529049175] transaction","detail":"{read_only:false; response_revision:323; number_of_response:1; }","duration":"154.336798ms","start":"2024-12-13T19:19:22.561281Z","end":"2024-12-13T19:19:22.715618Z","steps":["trace[529049175] 'process raft request' (duration: 154.083543ms)"],"step_count":1}
{"level":"info","ts":"2024-12-13T19:20:55.571373Z","caller":"traceutil/trace.go:171","msg":"trace[806318250] transaction","detail":"{read_only:false; response_revision:1226; number_of_response:1; }","duration":"106.250175ms","start":"2024-12-13T19:20:55.465107Z","end":"2024-12-13T19:20:55.571357Z","steps":["trace[806318250] 'process raft request' (duration: 106.12251ms)"],"step_count":1}
{"level":"info","ts":"2024-12-13T19:20:55.571507Z","caller":"traceutil/trace.go:171","msg":"trace[1413756354] transaction","detail":"{read_only:false; response_revision:1227; number_of_response:1; }","duration":"106.292761ms","start":"2024-12-13T19:20:55.465208Z","end":"2024-12-13T19:20:55.571501Z","steps":["trace[1413756354] 'process raft request' (duration: 106.0575ms)"],"step_count":1}
{"level":"info","ts":"2024-12-13T19:20:55.571631Z","caller":"traceutil/trace.go:171","msg":"trace[984580659] transaction","detail":"{read_only:false; response_revision:1228; number_of_response:1; }","duration":"105.507881ms","start":"2024-12-13T19:20:55.466116Z","end":"2024-12-13T19:20:55.571624Z","steps":["trace[984580659] 'process raft request' (duration: 105.172968ms)"],"step_count":1}
{"level":"info","ts":"2024-12-13T19:20:55.571726Z","caller":"traceutil/trace.go:171","msg":"trace[1743161593] linearizableReadLoop","detail":"{readStateIndex:1262; appliedIndex:1257; }","duration":"102.474618ms","start":"2024-12-13T19:20:55.469245Z","end":"2024-12-13T19:20:55.571719Z","steps":["trace[1743161593] 'read index received' (duration: 15.995395ms)","trace[1743161593] 'applied index is now lower than readState.Index' (duration: 86.478632ms)"],"step_count":2}
{"level":"warn","ts":"2024-12-13T19:20:55.572400Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"103.138971ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/pods/kube-system/csi-hostpathplugin-l2fk7\" ","response":"range_response_count:1 size:12993"}
{"level":"info","ts":"2024-12-13T19:20:55.572439Z","caller":"traceutil/trace.go:171","msg":"trace[1715638475] range","detail":"{range_begin:/registry/pods/kube-system/csi-hostpathplugin-l2fk7; range_end:; response_count:1; response_revision:1229; }","duration":"103.18936ms","start":"2024-12-13T19:20:55.469241Z","end":"2024-12-13T19:20:55.572430Z","steps":["trace[1715638475] 'agreement among raft nodes before linearized reading' (duration: 102.574073ms)"],"step_count":1}
{"level":"info","ts":"2024-12-13T19:20:55.571644Z","caller":"traceutil/trace.go:171","msg":"trace[1022539298] transaction","detail":"{read_only:false; response_revision:1225; number_of_response:1; }","duration":"106.60575ms","start":"2024-12-13T19:20:55.465021Z","end":"2024-12-13T19:20:55.571627Z","steps":["trace[1022539298] 'process raft request' (duration: 52.365558ms)","trace[1022539298] 'compare' (duration: 53.748928ms)"],"step_count":2}
==> kernel <==
19:25:52 up 3:07, 0 users, load average: 0.27, 1.47, 2.44
Linux addons-248098 5.15.0-1072-aws #78~20.04.1-Ubuntu SMP Wed Oct 9 15:29:54 UTC 2024 aarch64 aarch64 aarch64 GNU/Linux
PRETTY_NAME="Ubuntu 22.04.5 LTS"
==> kindnet [da25e26a83aad96484fe1eecafba3a8b5e62f5486ff06573a87eb752e248b7f3] <==
I1213 19:23:49.335955 1 main.go:301] handling current node
I1213 19:23:59.334937 1 main.go:297] Handling node with IPs: map[192.168.49.2:{}]
I1213 19:23:59.334973 1 main.go:301] handling current node
I1213 19:24:09.335192 1 main.go:297] Handling node with IPs: map[192.168.49.2:{}]
I1213 19:24:09.335337 1 main.go:301] handling current node
I1213 19:24:19.334956 1 main.go:297] Handling node with IPs: map[192.168.49.2:{}]
I1213 19:24:19.334996 1 main.go:301] handling current node
I1213 19:24:29.335200 1 main.go:297] Handling node with IPs: map[192.168.49.2:{}]
I1213 19:24:29.335314 1 main.go:301] handling current node
I1213 19:24:39.342442 1 main.go:297] Handling node with IPs: map[192.168.49.2:{}]
I1213 19:24:39.342480 1 main.go:301] handling current node
I1213 19:24:49.343677 1 main.go:297] Handling node with IPs: map[192.168.49.2:{}]
I1213 19:24:49.343722 1 main.go:301] handling current node
I1213 19:24:59.335245 1 main.go:297] Handling node with IPs: map[192.168.49.2:{}]
I1213 19:24:59.335276 1 main.go:301] handling current node
I1213 19:25:09.339935 1 main.go:297] Handling node with IPs: map[192.168.49.2:{}]
I1213 19:25:09.339972 1 main.go:301] handling current node
I1213 19:25:19.343530 1 main.go:297] Handling node with IPs: map[192.168.49.2:{}]
I1213 19:25:19.343580 1 main.go:301] handling current node
I1213 19:25:29.335421 1 main.go:297] Handling node with IPs: map[192.168.49.2:{}]
I1213 19:25:29.335459 1 main.go:301] handling current node
I1213 19:25:39.340798 1 main.go:297] Handling node with IPs: map[192.168.49.2:{}]
I1213 19:25:39.340837 1 main.go:301] handling current node
I1213 19:25:49.335813 1 main.go:297] Handling node with IPs: map[192.168.49.2:{}]
I1213 19:25:49.335848 1 main.go:301] handling current node
==> kube-apiserver [27ee00545a23ca3d022b68468445151371316d97acbaf2235b93791b944d3e2d] <==
> logger="UnhandledError"
E1213 19:20:49.877444 1 remote_available_controller.go:448] "Unhandled Error" err="v1beta1.metrics.k8s.io failed with: failing or missing response from https://10.97.72.3:443/apis/metrics.k8s.io/v1beta1: Get \"https://10.97.72.3:443/apis/metrics.k8s.io/v1beta1\": dial tcp 10.97.72.3:443: connect: connection refused" logger="UnhandledError"
I1213 19:20:50.167714 1 handler.go:286] Adding GroupVersion metrics.k8s.io v1beta1 to ResourceManager
E1213 19:21:34.791748 1 conn.go:339] Error on socket receive: read tcp 192.168.49.2:8443->192.168.49.1:36812: use of closed network connection
E1213 19:21:35.211372 1 conn.go:339] Error on socket receive: read tcp 192.168.49.2:8443->192.168.49.1:36854: use of closed network connection
I1213 19:21:44.609678 1 alloc.go:330] "allocated clusterIPs" service="headlamp/headlamp" clusterIPs={"IPv4":"10.104.201.169"}
I1213 19:22:47.019728 1 controller.go:615] quota admission added evaluator for: volumesnapshots.snapshot.storage.k8s.io
I1213 19:23:10.541433 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I1213 19:23:10.547768 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I1213 19:23:10.579260 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I1213 19:23:10.579633 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I1213 19:23:10.594662 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I1213 19:23:10.594705 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I1213 19:23:10.604171 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I1213 19:23:10.604219 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I1213 19:23:10.822739 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I1213 19:23:10.822775 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
W1213 19:23:11.599147 1 cacher.go:171] Terminating all watchers from cacher volumesnapshotclasses.snapshot.storage.k8s.io
W1213 19:23:11.823336 1 cacher.go:171] Terminating all watchers from cacher volumesnapshotcontents.snapshot.storage.k8s.io
W1213 19:23:11.835647 1 cacher.go:171] Terminating all watchers from cacher volumesnapshots.snapshot.storage.k8s.io
I1213 19:23:24.385887 1 handler.go:286] Adding GroupVersion gadget.kinvolk.io v1alpha1 to ResourceManager
W1213 19:23:25.508597 1 cacher.go:171] Terminating all watchers from cacher traces.gadget.kinvolk.io
I1213 19:23:29.989652 1 controller.go:615] quota admission added evaluator for: ingresses.networking.k8s.io
I1213 19:23:30.374495 1 alloc.go:330] "allocated clusterIPs" service="default/nginx" clusterIPs={"IPv4":"10.97.136.243"}
I1213 19:25:50.130689 1 alloc.go:330] "allocated clusterIPs" service="default/hello-world-app" clusterIPs={"IPv4":"10.109.68.160"}
==> kube-controller-manager [4283a1804a94cc88954082e4f508a1cbf5f868d2c0662a9d3f0e826e9b6c5f1a] <==
W1213 19:24:04.938570 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1213 19:24:04.938615 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W1213 19:24:19.053058 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1213 19:24:19.053103 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W1213 19:24:26.780082 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1213 19:24:26.780128 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W1213 19:24:30.599554 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1213 19:24:30.599676 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W1213 19:24:38.856914 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1213 19:24:38.856957 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W1213 19:25:03.136050 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1213 19:25:03.136094 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W1213 19:25:15.756367 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1213 19:25:15.756414 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W1213 19:25:21.780122 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1213 19:25:21.780253 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W1213 19:25:28.782508 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1213 19:25:28.782549 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W1213 19:25:36.223807 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1213 19:25:36.223851 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
I1213 19:25:49.876915 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="default/hello-world-app-55bf9c44b4" duration="63.23349ms"
I1213 19:25:49.888298 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="default/hello-world-app-55bf9c44b4" duration="10.971363ms"
I1213 19:25:49.888753 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="default/hello-world-app-55bf9c44b4" duration="64.222µs"
I1213 19:25:51.906869 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="default/hello-world-app-55bf9c44b4" duration="12.483711ms"
I1213 19:25:51.906943 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="default/hello-world-app-55bf9c44b4" duration="34.914µs"
==> kube-proxy [1449f483df90f5a531d350913e0aa7cdf914d18ed8dca152d252402554d37102] <==
I1213 19:19:26.856575 1 server_linux.go:66] "Using iptables proxy"
I1213 19:19:27.434663 1 server.go:677] "Successfully retrieved node IP(s)" IPs=["192.168.49.2"]
E1213 19:19:27.434728 1 server.go:234] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I1213 19:19:28.459633 1 server.go:243] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I1213 19:19:28.459776 1 server_linux.go:169] "Using iptables Proxier"
I1213 19:19:28.462676 1 proxier.go:255] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I1213 19:19:28.463711 1 server.go:483] "Version info" version="v1.31.2"
I1213 19:19:28.463786 1 server.go:485] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1213 19:19:28.553230 1 config.go:199] "Starting service config controller"
I1213 19:19:28.553341 1 shared_informer.go:313] Waiting for caches to sync for service config
I1213 19:19:28.553931 1 config.go:105] "Starting endpoint slice config controller"
I1213 19:19:28.575024 1 shared_informer.go:313] Waiting for caches to sync for endpoint slice config
I1213 19:19:28.554133 1 config.go:328] "Starting node config controller"
I1213 19:19:28.575148 1 shared_informer.go:313] Waiting for caches to sync for node config
I1213 19:19:28.710663 1 shared_informer.go:320] Caches are synced for service config
I1213 19:19:28.710979 1 shared_informer.go:320] Caches are synced for node config
I1213 19:19:28.711013 1 shared_informer.go:320] Caches are synced for endpoint slice config
==> kube-scheduler [833e3ba74cac9f8b814ea06aeafe171150043188b329c9d621a03c75dbc4578f] <==
W1213 19:19:13.992373 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
E1213 19:19:13.994022 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicationcontrollers\" in API group \"\" at the cluster scope" logger="UnhandledError"
W1213 19:19:13.992722 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
E1213 19:19:13.994124 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User \"system:kube-scheduler\" cannot list resource \"namespaces\" in API group \"\" at the cluster scope" logger="UnhandledError"
W1213 19:19:14.810363 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
E1213 19:19:14.811859 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User \"system:kube-scheduler\" cannot list resource \"nodes\" in API group \"\" at the cluster scope" logger="UnhandledError"
W1213 19:19:14.830724 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E1213 19:19:14.830897 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope" logger="UnhandledError"
W1213 19:19:14.876077 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
E1213 19:19:14.876126 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User \"system:kube-scheduler\" cannot list resource \"namespaces\" in API group \"\" at the cluster scope" logger="UnhandledError"
W1213 19:19:14.882112 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
E1213 19:19:14.882231 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csinodes\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
W1213 19:19:14.988961 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
E1213 19:19:14.989138 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicasets\" in API group \"apps\" at the cluster scope" logger="UnhandledError"
W1213 19:19:14.991077 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
E1213 19:19:14.991192 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"statefulsets\" in API group \"apps\" at the cluster scope" logger="UnhandledError"
W1213 19:19:15.093515 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E1213 19:19:15.093566 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"storageclasses\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
W1213 19:19:15.127159 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E1213 19:19:15.127305 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User \"system:kube-scheduler\" cannot list resource \"pods\" in API group \"\" at the cluster scope" logger="UnhandledError"
W1213 19:19:15.207229 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E1213 19:19:15.207275 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
W1213 19:19:15.258698 1 reflector.go:561] runtime/asm_arm64.s:1222: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
E1213 19:19:15.258955 1 reflector.go:158] "Unhandled Error" err="runtime/asm_arm64.s:1222: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"extension-apiserver-authentication\" is forbidden: User \"system:kube-scheduler\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\"" logger="UnhandledError"
I1213 19:19:17.658668 1 shared_informer.go:320] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Dec 13 19:24:06 addons-248098 kubelet[1527]: E1213 19:24:06.718238 1527 eviction_manager.go:212] "Eviction manager: failed to synchronize" err="eviction manager: failed to get HasDedicatedImageFs: missing image stats: &ImageFsInfoResponse{ImageFilesystems:[]*FilesystemUsage{&FilesystemUsage{Timestamp:1734117846717930318,FsId:&FilesystemIdentifier{Mountpoint:/var/lib/containers/storage/overlay-images,},UsedBytes:&UInt64Value{Value:606284,},InodesUsed:&UInt64Value{Value:231,},},},ContainerFilesystems:[]*FilesystemUsage{},}"
Dec 13 19:24:16 addons-248098 kubelet[1527]: E1213 19:24:16.585117 1527 container_manager_linux.go:513] "Failed to find cgroups of kubelet" err="cpu and memory cgroup hierarchy not unified. cpu: /docker/71118ff07ec6fa79104cf400f95c50c9ae227a1aad64456bb5c81d1d75958776, memory: /docker/71118ff07ec6fa79104cf400f95c50c9ae227a1aad64456bb5c81d1d75958776/system.slice/kubelet.service"
Dec 13 19:24:16 addons-248098 kubelet[1527]: E1213 19:24:16.720992 1527 eviction_manager.go:257] "Eviction manager: failed to get HasDedicatedImageFs" err="missing image stats: &ImageFsInfoResponse{ImageFilesystems:[]*FilesystemUsage{&FilesystemUsage{Timestamp:1734117856720670706,FsId:&FilesystemIdentifier{Mountpoint:/var/lib/containers/storage/overlay-images,},UsedBytes:&UInt64Value{Value:606284,},InodesUsed:&UInt64Value{Value:231,},},},ContainerFilesystems:[]*FilesystemUsage{},}"
Dec 13 19:24:16 addons-248098 kubelet[1527]: E1213 19:24:16.721071 1527 eviction_manager.go:212] "Eviction manager: failed to synchronize" err="eviction manager: failed to get HasDedicatedImageFs: missing image stats: &ImageFsInfoResponse{ImageFilesystems:[]*FilesystemUsage{&FilesystemUsage{Timestamp:1734117856720670706,FsId:&FilesystemIdentifier{Mountpoint:/var/lib/containers/storage/overlay-images,},UsedBytes:&UInt64Value{Value:606284,},InodesUsed:&UInt64Value{Value:231,},},},ContainerFilesystems:[]*FilesystemUsage{},}"
Dec 13 19:24:26 addons-248098 kubelet[1527]: E1213 19:24:26.726872 1527 eviction_manager.go:257] "Eviction manager: failed to get HasDedicatedImageFs" err="missing image stats: &ImageFsInfoResponse{ImageFilesystems:[]*FilesystemUsage{&FilesystemUsage{Timestamp:1734117866725441149,FsId:&FilesystemIdentifier{Mountpoint:/var/lib/containers/storage/overlay-images,},UsedBytes:&UInt64Value{Value:606284,},InodesUsed:&UInt64Value{Value:231,},},},ContainerFilesystems:[]*FilesystemUsage{},}"
Dec 13 19:24:26 addons-248098 kubelet[1527]: E1213 19:24:26.726911 1527 eviction_manager.go:212] "Eviction manager: failed to synchronize" err="eviction manager: failed to get HasDedicatedImageFs: missing image stats: &ImageFsInfoResponse{ImageFilesystems:[]*FilesystemUsage{&FilesystemUsage{Timestamp:1734117866725441149,FsId:&FilesystemIdentifier{Mountpoint:/var/lib/containers/storage/overlay-images,},UsedBytes:&UInt64Value{Value:606284,},InodesUsed:&UInt64Value{Value:231,},},},ContainerFilesystems:[]*FilesystemUsage{},}"
Dec 13 19:24:36 addons-248098 kubelet[1527]: E1213 19:24:36.729337 1527 eviction_manager.go:257] "Eviction manager: failed to get HasDedicatedImageFs" err="missing image stats: &ImageFsInfoResponse{ImageFilesystems:[]*FilesystemUsage{&FilesystemUsage{Timestamp:1734117876729082662,FsId:&FilesystemIdentifier{Mountpoint:/var/lib/containers/storage/overlay-images,},UsedBytes:&UInt64Value{Value:606284,},InodesUsed:&UInt64Value{Value:231,},},},ContainerFilesystems:[]*FilesystemUsage{},}"
Dec 13 19:24:36 addons-248098 kubelet[1527]: E1213 19:24:36.729377 1527 eviction_manager.go:212] "Eviction manager: failed to synchronize" err="eviction manager: failed to get HasDedicatedImageFs: missing image stats: &ImageFsInfoResponse{ImageFilesystems:[]*FilesystemUsage{&FilesystemUsage{Timestamp:1734117876729082662,FsId:&FilesystemIdentifier{Mountpoint:/var/lib/containers/storage/overlay-images,},UsedBytes:&UInt64Value{Value:606284,},InodesUsed:&UInt64Value{Value:231,},},},ContainerFilesystems:[]*FilesystemUsage{},}"
Dec 13 19:24:46 addons-248098 kubelet[1527]: E1213 19:24:46.731674 1527 eviction_manager.go:257] "Eviction manager: failed to get HasDedicatedImageFs" err="missing image stats: &ImageFsInfoResponse{ImageFilesystems:[]*FilesystemUsage{&FilesystemUsage{Timestamp:1734117886731412820,FsId:&FilesystemIdentifier{Mountpoint:/var/lib/containers/storage/overlay-images,},UsedBytes:&UInt64Value{Value:606284,},InodesUsed:&UInt64Value{Value:231,},},},ContainerFilesystems:[]*FilesystemUsage{},}"
Dec 13 19:24:46 addons-248098 kubelet[1527]: E1213 19:24:46.731710 1527 eviction_manager.go:212] "Eviction manager: failed to synchronize" err="eviction manager: failed to get HasDedicatedImageFs: missing image stats: &ImageFsInfoResponse{ImageFilesystems:[]*FilesystemUsage{&FilesystemUsage{Timestamp:1734117886731412820,FsId:&FilesystemIdentifier{Mountpoint:/var/lib/containers/storage/overlay-images,},UsedBytes:&UInt64Value{Value:606284,},InodesUsed:&UInt64Value{Value:231,},},},ContainerFilesystems:[]*FilesystemUsage{},}"
Dec 13 19:24:56 addons-248098 kubelet[1527]: E1213 19:24:56.734459 1527 eviction_manager.go:257] "Eviction manager: failed to get HasDedicatedImageFs" err="missing image stats: &ImageFsInfoResponse{ImageFilesystems:[]*FilesystemUsage{&FilesystemUsage{Timestamp:1734117896734174638,FsId:&FilesystemIdentifier{Mountpoint:/var/lib/containers/storage/overlay-images,},UsedBytes:&UInt64Value{Value:606284,},InodesUsed:&UInt64Value{Value:231,},},},ContainerFilesystems:[]*FilesystemUsage{},}"
Dec 13 19:24:56 addons-248098 kubelet[1527]: E1213 19:24:56.734504 1527 eviction_manager.go:212] "Eviction manager: failed to synchronize" err="eviction manager: failed to get HasDedicatedImageFs: missing image stats: &ImageFsInfoResponse{ImageFilesystems:[]*FilesystemUsage{&FilesystemUsage{Timestamp:1734117896734174638,FsId:&FilesystemIdentifier{Mountpoint:/var/lib/containers/storage/overlay-images,},UsedBytes:&UInt64Value{Value:606284,},InodesUsed:&UInt64Value{Value:231,},},},ContainerFilesystems:[]*FilesystemUsage{},}"
Dec 13 19:25:06 addons-248098 kubelet[1527]: E1213 19:25:06.737519 1527 eviction_manager.go:257] "Eviction manager: failed to get HasDedicatedImageFs" err="missing image stats: &ImageFsInfoResponse{ImageFilesystems:[]*FilesystemUsage{&FilesystemUsage{Timestamp:1734117906737255996,FsId:&FilesystemIdentifier{Mountpoint:/var/lib/containers/storage/overlay-images,},UsedBytes:&UInt64Value{Value:606284,},InodesUsed:&UInt64Value{Value:231,},},},ContainerFilesystems:[]*FilesystemUsage{},}"
Dec 13 19:25:06 addons-248098 kubelet[1527]: E1213 19:25:06.737557 1527 eviction_manager.go:212] "Eviction manager: failed to synchronize" err="eviction manager: failed to get HasDedicatedImageFs: missing image stats: &ImageFsInfoResponse{ImageFilesystems:[]*FilesystemUsage{&FilesystemUsage{Timestamp:1734117906737255996,FsId:&FilesystemIdentifier{Mountpoint:/var/lib/containers/storage/overlay-images,},UsedBytes:&UInt64Value{Value:606284,},InodesUsed:&UInt64Value{Value:231,},},},ContainerFilesystems:[]*FilesystemUsage{},}"
Dec 13 19:25:14 addons-248098 kubelet[1527]: I1213 19:25:14.495042 1527 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="default/busybox" secret="" err="secret \"gcp-auth\" not found"
Dec 13 19:25:16 addons-248098 kubelet[1527]: E1213 19:25:16.740664 1527 eviction_manager.go:257] "Eviction manager: failed to get HasDedicatedImageFs" err="missing image stats: &ImageFsInfoResponse{ImageFilesystems:[]*FilesystemUsage{&FilesystemUsage{Timestamp:1734117916740427183,FsId:&FilesystemIdentifier{Mountpoint:/var/lib/containers/storage/overlay-images,},UsedBytes:&UInt64Value{Value:606284,},InodesUsed:&UInt64Value{Value:231,},},},ContainerFilesystems:[]*FilesystemUsage{},}"
Dec 13 19:25:16 addons-248098 kubelet[1527]: E1213 19:25:16.740699 1527 eviction_manager.go:212] "Eviction manager: failed to synchronize" err="eviction manager: failed to get HasDedicatedImageFs: missing image stats: &ImageFsInfoResponse{ImageFilesystems:[]*FilesystemUsage{&FilesystemUsage{Timestamp:1734117916740427183,FsId:&FilesystemIdentifier{Mountpoint:/var/lib/containers/storage/overlay-images,},UsedBytes:&UInt64Value{Value:606284,},InodesUsed:&UInt64Value{Value:231,},},},ContainerFilesystems:[]*FilesystemUsage{},}"
Dec 13 19:25:26 addons-248098 kubelet[1527]: E1213 19:25:26.744014 1527 eviction_manager.go:257] "Eviction manager: failed to get HasDedicatedImageFs" err="missing image stats: &ImageFsInfoResponse{ImageFilesystems:[]*FilesystemUsage{&FilesystemUsage{Timestamp:1734117926743736063,FsId:&FilesystemIdentifier{Mountpoint:/var/lib/containers/storage/overlay-images,},UsedBytes:&UInt64Value{Value:606284,},InodesUsed:&UInt64Value{Value:231,},},},ContainerFilesystems:[]*FilesystemUsage{},}"
Dec 13 19:25:26 addons-248098 kubelet[1527]: E1213 19:25:26.744061 1527 eviction_manager.go:212] "Eviction manager: failed to synchronize" err="eviction manager: failed to get HasDedicatedImageFs: missing image stats: &ImageFsInfoResponse{ImageFilesystems:[]*FilesystemUsage{&FilesystemUsage{Timestamp:1734117926743736063,FsId:&FilesystemIdentifier{Mountpoint:/var/lib/containers/storage/overlay-images,},UsedBytes:&UInt64Value{Value:606284,},InodesUsed:&UInt64Value{Value:231,},},},ContainerFilesystems:[]*FilesystemUsage{},}"
Dec 13 19:25:36 addons-248098 kubelet[1527]: E1213 19:25:36.746342 1527 eviction_manager.go:257] "Eviction manager: failed to get HasDedicatedImageFs" err="missing image stats: &ImageFsInfoResponse{ImageFilesystems:[]*FilesystemUsage{&FilesystemUsage{Timestamp:1734117936746068138,FsId:&FilesystemIdentifier{Mountpoint:/var/lib/containers/storage/overlay-images,},UsedBytes:&UInt64Value{Value:606284,},InodesUsed:&UInt64Value{Value:231,},},},ContainerFilesystems:[]*FilesystemUsage{},}"
Dec 13 19:25:36 addons-248098 kubelet[1527]: E1213 19:25:36.746383 1527 eviction_manager.go:212] "Eviction manager: failed to synchronize" err="eviction manager: failed to get HasDedicatedImageFs: missing image stats: &ImageFsInfoResponse{ImageFilesystems:[]*FilesystemUsage{&FilesystemUsage{Timestamp:1734117936746068138,FsId:&FilesystemIdentifier{Mountpoint:/var/lib/containers/storage/overlay-images,},UsedBytes:&UInt64Value{Value:606284,},InodesUsed:&UInt64Value{Value:231,},},},ContainerFilesystems:[]*FilesystemUsage{},}"
Dec 13 19:25:46 addons-248098 kubelet[1527]: E1213 19:25:46.749342 1527 eviction_manager.go:257] "Eviction manager: failed to get HasDedicatedImageFs" err="missing image stats: &ImageFsInfoResponse{ImageFilesystems:[]*FilesystemUsage{&FilesystemUsage{Timestamp:1734117946749077931,FsId:&FilesystemIdentifier{Mountpoint:/var/lib/containers/storage/overlay-images,},UsedBytes:&UInt64Value{Value:606284,},InodesUsed:&UInt64Value{Value:231,},},},ContainerFilesystems:[]*FilesystemUsage{},}"
Dec 13 19:25:46 addons-248098 kubelet[1527]: E1213 19:25:46.749383 1527 eviction_manager.go:212] "Eviction manager: failed to synchronize" err="eviction manager: failed to get HasDedicatedImageFs: missing image stats: &ImageFsInfoResponse{ImageFilesystems:[]*FilesystemUsage{&FilesystemUsage{Timestamp:1734117946749077931,FsId:&FilesystemIdentifier{Mountpoint:/var/lib/containers/storage/overlay-images,},UsedBytes:&UInt64Value{Value:606284,},InodesUsed:&UInt64Value{Value:231,},},},ContainerFilesystems:[]*FilesystemUsage{},}"
Dec 13 19:25:49 addons-248098 kubelet[1527]: I1213 19:25:49.847654 1527 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="default/nginx" podStartSLOduration=137.206649815 podStartE2EDuration="2m19.847634724s" podCreationTimestamp="2024-12-13 19:23:30 +0000 UTC" firstStartedPulling="2024-12-13 19:23:30.641187923 +0000 UTC m=+254.266524798" lastFinishedPulling="2024-12-13 19:23:33.282172832 +0000 UTC m=+256.907509707" observedRunningTime="2024-12-13 19:23:33.61062549 +0000 UTC m=+257.235962357" watchObservedRunningTime="2024-12-13 19:25:49.847634724 +0000 UTC m=+393.472971590"
Dec 13 19:25:50 addons-248098 kubelet[1527]: I1213 19:25:49.994736 1527 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xh8gv\" (UniqueName: \"kubernetes.io/projected/14af58da-f64c-47d5-98c4-0b019b2ce7f2-kube-api-access-xh8gv\") pod \"hello-world-app-55bf9c44b4-z9wlr\" (UID: \"14af58da-f64c-47d5-98c4-0b019b2ce7f2\") " pod="default/hello-world-app-55bf9c44b4-z9wlr"
==> storage-provisioner [0c0704d382a69b93cc22a51e1e8cf786c5e6bb3b37718a2ca963a7aa91566d92] <==
I1213 19:19:40.443989 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1213 19:19:40.470077 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1213 19:19:40.470130 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I1213 19:19:40.503801 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I1213 19:19:40.507064 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_addons-248098_ef556317-08dd-4573-8f53-d898928781c1!
I1213 19:19:40.511401 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"cbcfe82e-6948-4068-b720-61c573d1f4fc", APIVersion:"v1", ResourceVersion:"893", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' addons-248098_ef556317-08dd-4573-8f53-d898928781c1 became leader
I1213 19:19:40.607521 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_addons-248098_ef556317-08dd-4573-8f53-d898928781c1!
E1213 19:23:09.647789 1 controller.go:1050] claim "1495a858-fb44-41da-96f5-75a367db6d66" in work queue no longer exists
-- /stdout --
helpers_test.go:254: (dbg) Run: out/minikube-linux-arm64 status --format={{.APIServer}} -p addons-248098 -n addons-248098
helpers_test.go:261: (dbg) Run: kubectl --context addons-248098 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:272: non-running pods: ingress-nginx-admission-create-2fpd2 ingress-nginx-admission-patch-7r99g
helpers_test.go:274: ======> post-mortem[TestAddons/parallel/Ingress]: describe non-running pods <======
helpers_test.go:277: (dbg) Run: kubectl --context addons-248098 describe pod ingress-nginx-admission-create-2fpd2 ingress-nginx-admission-patch-7r99g
helpers_test.go:277: (dbg) Non-zero exit: kubectl --context addons-248098 describe pod ingress-nginx-admission-create-2fpd2 ingress-nginx-admission-patch-7r99g: exit status 1 (83.770496ms)
** stderr **
Error from server (NotFound): pods "ingress-nginx-admission-create-2fpd2" not found
Error from server (NotFound): pods "ingress-nginx-admission-patch-7r99g" not found
** /stderr **
helpers_test.go:279: kubectl --context addons-248098 describe pod ingress-nginx-admission-create-2fpd2 ingress-nginx-admission-patch-7r99g: exit status 1
addons_test.go:992: (dbg) Run: out/minikube-linux-arm64 -p addons-248098 addons disable ingress-dns --alsologtostderr -v=1
addons_test.go:992: (dbg) Done: out/minikube-linux-arm64 -p addons-248098 addons disable ingress-dns --alsologtostderr -v=1: (1.065423617s)
addons_test.go:992: (dbg) Run: out/minikube-linux-arm64 -p addons-248098 addons disable ingress --alsologtostderr -v=1
addons_test.go:992: (dbg) Done: out/minikube-linux-arm64 -p addons-248098 addons disable ingress --alsologtostderr -v=1: (8.259224055s)
--- FAIL: TestAddons/parallel/Ingress (152.84s)