=== RUN TestAddons/parallel/Ingress
=== PAUSE TestAddons/parallel/Ingress
=== CONT TestAddons/parallel/Ingress
addons_test.go:207: (dbg) Run: kubectl --context addons-002422 wait --for=condition=ready --namespace=ingress-nginx pod --selector=app.kubernetes.io/component=controller --timeout=90s
addons_test.go:232: (dbg) Run: kubectl --context addons-002422 replace --force -f testdata/nginx-ingress-v1.yaml
addons_test.go:245: (dbg) Run: kubectl --context addons-002422 replace --force -f testdata/nginx-pod-svc.yaml
addons_test.go:250: (dbg) TestAddons/parallel/Ingress: waiting 8m0s for pods matching "run=nginx" in namespace "default" ...
helpers_test.go:344: "nginx" [c8e1b210-5413-4f21-96cb-5ccf9e2929b8] Pending / Ready:ContainersNotReady (containers with unready status: [nginx]) / ContainersReady:ContainersNotReady (containers with unready status: [nginx])
helpers_test.go:344: "nginx" [c8e1b210-5413-4f21-96cb-5ccf9e2929b8] Running
addons_test.go:250: (dbg) TestAddons/parallel/Ingress: run=nginx healthy within 8.003955006s
I1014 13:43:45.379905 7544 kapi.go:150] Service nginx in namespace default found.
addons_test.go:262: (dbg) Run: out/minikube-linux-arm64 -p addons-002422 ssh "curl -s http://127.0.0.1/ -H 'Host: nginx.example.com'"
addons_test.go:262: (dbg) Non-zero exit: out/minikube-linux-arm64 -p addons-002422 ssh "curl -s http://127.0.0.1/ -H 'Host: nginx.example.com'": exit status 1 (2m11.314787333s)
** stderr **
ssh: Process exited with status 28
** /stderr **
addons_test.go:278: failed to get expected response from http://127.0.0.1/ within minikube: exit status 1
addons_test.go:286: (dbg) Run: kubectl --context addons-002422 replace --force -f testdata/ingress-dns-example-v1.yaml
addons_test.go:291: (dbg) Run: out/minikube-linux-arm64 -p addons-002422 ip
addons_test.go:297: (dbg) Run: nslookup hello-john.test 192.168.49.2
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:230: ======> post-mortem[TestAddons/parallel/Ingress]: docker inspect <======
helpers_test.go:231: (dbg) Run: docker inspect addons-002422
helpers_test.go:235: (dbg) docker inspect addons-002422:
-- stdout --
[
{
"Id": "05e13f44fa23211d41ae7b94d00466d20b84537aca8298c4d05c6211297bec8c",
"Created": "2024-10-14T13:39:26.040660176Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 8793,
"ExitCode": 0,
"Error": "",
"StartedAt": "2024-10-14T13:39:26.200141481Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:e5ca9b83e048da5ecbd9864892b13b9f06d661ec5eae41590141157c6fe62bf7",
"ResolvConfPath": "/var/lib/docker/containers/05e13f44fa23211d41ae7b94d00466d20b84537aca8298c4d05c6211297bec8c/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/05e13f44fa23211d41ae7b94d00466d20b84537aca8298c4d05c6211297bec8c/hostname",
"HostsPath": "/var/lib/docker/containers/05e13f44fa23211d41ae7b94d00466d20b84537aca8298c4d05c6211297bec8c/hosts",
"LogPath": "/var/lib/docker/containers/05e13f44fa23211d41ae7b94d00466d20b84537aca8298c4d05c6211297bec8c/05e13f44fa23211d41ae7b94d00466d20b84537aca8298c4d05c6211297bec8c-json.log",
"Name": "/addons-002422",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"addons-002422:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {}
},
"NetworkMode": "addons-002422",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 4194304000,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 8388608000,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"LowerDir": "/var/lib/docker/overlay2/4aa3658e12047d4aae80e56b1a737b93933e3445eef34b2f05f9ae1a1f27b38b-init/diff:/var/lib/docker/overlay2/0fbe7ab461eb9f9a72ecb1d2c088de9e51a70b12c6d6de37aeffa6e2c5634bdc/diff",
"MergedDir": "/var/lib/docker/overlay2/4aa3658e12047d4aae80e56b1a737b93933e3445eef34b2f05f9ae1a1f27b38b/merged",
"UpperDir": "/var/lib/docker/overlay2/4aa3658e12047d4aae80e56b1a737b93933e3445eef34b2f05f9ae1a1f27b38b/diff",
"WorkDir": "/var/lib/docker/overlay2/4aa3658e12047d4aae80e56b1a737b93933e3445eef34b2f05f9ae1a1f27b38b/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "addons-002422",
"Source": "/var/lib/docker/volumes/addons-002422/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "addons-002422",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1728382586-19774@sha256:5d8c4f6d838467365e214e2194dd0153a763e3f78723b5f2a8e06ef7b47409ec",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "addons-002422",
"name.minikube.sigs.k8s.io": "addons-002422",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "0aed6d17065638fabcf4af9629eb2706f94c1b790a82245b3b3aad651ea1da99",
"SandboxKey": "/var/run/docker/netns/0aed6d170656",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32768"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32769"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32772"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32770"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32771"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"addons-002422": {
"IPAMConfig": {
"IPv4Address": "192.168.49.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "02:42:c0:a8:31:02",
"DriverOpts": null,
"NetworkID": "0ff409cb6a6d634b31679069de159a6c4d604dc8e7199db02844607a2ed8ceed",
"EndpointID": "6ecc69181af9927db04c9d672fff7ea2ed76c70627324bcf71e3d5589e3b0324",
"Gateway": "192.168.49.1",
"IPAddress": "192.168.49.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"addons-002422",
"05e13f44fa23"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:239: (dbg) Run: out/minikube-linux-arm64 status --format={{.Host}} -p addons-002422 -n addons-002422
helpers_test.go:244: <<< TestAddons/parallel/Ingress FAILED: start of post-mortem logs <<<
helpers_test.go:245: ======> post-mortem[TestAddons/parallel/Ingress]: minikube logs <======
helpers_test.go:247: (dbg) Run: out/minikube-linux-arm64 -p addons-002422 logs -n 25
helpers_test.go:247: (dbg) Done: out/minikube-linux-arm64 -p addons-002422 logs -n 25: (1.563613616s)
helpers_test.go:252: TestAddons/parallel/Ingress logs:
-- stdout --
==> Audit <==
|---------|--------------------------------------|------------------------|---------|---------|---------------------|---------------------|
| Command | Args | Profile | User | Version | Start Time | End Time |
|---------|--------------------------------------|------------------------|---------|---------|---------------------|---------------------|
| delete | --all | minikube | jenkins | v1.34.0 | 14 Oct 24 13:38 UTC | 14 Oct 24 13:38 UTC |
| delete | -p download-only-457703 | download-only-457703 | jenkins | v1.34.0 | 14 Oct 24 13:38 UTC | 14 Oct 24 13:38 UTC |
| start | -o=json --download-only | download-only-347934 | jenkins | v1.34.0 | 14 Oct 24 13:38 UTC | |
| | -p download-only-347934 | | | | | |
| | --force --alsologtostderr | | | | | |
| | --kubernetes-version=v1.31.1 | | | | | |
| | --container-runtime=crio | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=crio | | | | | |
| delete | --all | minikube | jenkins | v1.34.0 | 14 Oct 24 13:38 UTC | 14 Oct 24 13:38 UTC |
| delete | -p download-only-347934 | download-only-347934 | jenkins | v1.34.0 | 14 Oct 24 13:38 UTC | 14 Oct 24 13:38 UTC |
| delete | -p download-only-457703 | download-only-457703 | jenkins | v1.34.0 | 14 Oct 24 13:39 UTC | 14 Oct 24 13:39 UTC |
| delete | -p download-only-347934 | download-only-347934 | jenkins | v1.34.0 | 14 Oct 24 13:39 UTC | 14 Oct 24 13:39 UTC |
| start | --download-only -p | download-docker-849591 | jenkins | v1.34.0 | 14 Oct 24 13:39 UTC | |
| | download-docker-849591 | | | | | |
| | --alsologtostderr | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=crio | | | | | |
| delete | -p download-docker-849591 | download-docker-849591 | jenkins | v1.34.0 | 14 Oct 24 13:39 UTC | 14 Oct 24 13:39 UTC |
| start | --download-only -p | binary-mirror-893512 | jenkins | v1.34.0 | 14 Oct 24 13:39 UTC | |
| | binary-mirror-893512 | | | | | |
| | --alsologtostderr | | | | | |
| | --binary-mirror | | | | | |
| | http://127.0.0.1:35277 | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=crio | | | | | |
| delete | -p binary-mirror-893512 | binary-mirror-893512 | jenkins | v1.34.0 | 14 Oct 24 13:39 UTC | 14 Oct 24 13:39 UTC |
| addons | disable dashboard -p | addons-002422 | jenkins | v1.34.0 | 14 Oct 24 13:39 UTC | |
| | addons-002422 | | | | | |
| addons | enable dashboard -p | addons-002422 | jenkins | v1.34.0 | 14 Oct 24 13:39 UTC | |
| | addons-002422 | | | | | |
| start | -p addons-002422 --wait=true | addons-002422 | jenkins | v1.34.0 | 14 Oct 24 13:39 UTC | 14 Oct 24 13:41 UTC |
| | --memory=4000 --alsologtostderr | | | | | |
| | --addons=registry | | | | | |
| | --addons=metrics-server | | | | | |
| | --addons=volumesnapshots | | | | | |
| | --addons=csi-hostpath-driver | | | | | |
| | --addons=gcp-auth | | | | | |
| | --addons=cloud-spanner | | | | | |
| | --addons=inspektor-gadget | | | | | |
| | --addons=nvidia-device-plugin | | | | | |
| | --addons=yakd --addons=volcano | | | | | |
| | --addons=amd-gpu-device-plugin | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=crio | | | | | |
| | --addons=ingress | | | | | |
| | --addons=ingress-dns | | | | | |
| | --addons=storage-provisioner-rancher | | | | | |
| addons | addons-002422 addons disable | addons-002422 | jenkins | v1.34.0 | 14 Oct 24 13:41 UTC | 14 Oct 24 13:41 UTC |
| | volcano --alsologtostderr -v=1 | | | | | |
| addons | addons-002422 addons disable | addons-002422 | jenkins | v1.34.0 | 14 Oct 24 13:42 UTC | 14 Oct 24 13:42 UTC |
| | gcp-auth --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| addons | enable headlamp | addons-002422 | jenkins | v1.34.0 | 14 Oct 24 13:42 UTC | 14 Oct 24 13:42 UTC |
| | -p addons-002422 | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-002422 addons disable | addons-002422 | jenkins | v1.34.0 | 14 Oct 24 13:42 UTC | 14 Oct 24 13:42 UTC |
| | headlamp --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| ip | addons-002422 ip | addons-002422 | jenkins | v1.34.0 | 14 Oct 24 13:42 UTC | 14 Oct 24 13:42 UTC |
| addons | addons-002422 addons disable | addons-002422 | jenkins | v1.34.0 | 14 Oct 24 13:42 UTC | 14 Oct 24 13:42 UTC |
| | registry --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| addons | addons-002422 addons | addons-002422 | jenkins | v1.34.0 | 14 Oct 24 13:43 UTC | 14 Oct 24 13:43 UTC |
| | disable volumesnapshots | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-002422 addons | addons-002422 | jenkins | v1.34.0 | 14 Oct 24 13:43 UTC | 14 Oct 24 13:43 UTC |
| | disable csi-hostpath-driver | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-002422 addons | addons-002422 | jenkins | v1.34.0 | 14 Oct 24 13:43 UTC | 14 Oct 24 13:43 UTC |
| | disable inspektor-gadget | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| ssh | addons-002422 ssh curl -s | addons-002422 | jenkins | v1.34.0 | 14 Oct 24 13:43 UTC | |
| | http://127.0.0.1/ -H 'Host: | | | | | |
| | nginx.example.com' | | | | | |
| ip | addons-002422 ip | addons-002422 | jenkins | v1.34.0 | 14 Oct 24 13:45 UTC | 14 Oct 24 13:45 UTC |
|---------|--------------------------------------|------------------------|---------|---------|---------------------|---------------------|
==> Last Start <==
Log file created at: 2024/10/14 13:39:01
Running on machine: ip-172-31-21-244
Binary: Built with gc go1.23.1 for linux/arm64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I1014 13:39:01.519189 8300 out.go:345] Setting OutFile to fd 1 ...
I1014 13:39:01.519388 8300 out.go:392] TERM=,COLORTERM=, which probably does not support color
I1014 13:39:01.519399 8300 out.go:358] Setting ErrFile to fd 2...
I1014 13:39:01.519408 8300 out.go:392] TERM=,COLORTERM=, which probably does not support color
I1014 13:39:01.519689 8300 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19790-2228/.minikube/bin
I1014 13:39:01.520212 8300 out.go:352] Setting JSON to false
I1014 13:39:01.521042 8300 start.go:129] hostinfo: {"hostname":"ip-172-31-21-244","uptime":1292,"bootTime":1728911849,"procs":147,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1070-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"da8ac1fd-6236-412a-a346-95873c98230d"}
I1014 13:39:01.521114 8300 start.go:139] virtualization:
I1014 13:39:01.523571 8300 out.go:177] * [addons-002422] minikube v1.34.0 on Ubuntu 20.04 (arm64)
I1014 13:39:01.525747 8300 out.go:177] - MINIKUBE_LOCATION=19790
I1014 13:39:01.525781 8300 notify.go:220] Checking for updates...
I1014 13:39:01.529853 8300 out.go:177] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I1014 13:39:01.531546 8300 out.go:177] - KUBECONFIG=/home/jenkins/minikube-integration/19790-2228/kubeconfig
I1014 13:39:01.532842 8300 out.go:177] - MINIKUBE_HOME=/home/jenkins/minikube-integration/19790-2228/.minikube
I1014 13:39:01.534232 8300 out.go:177] - MINIKUBE_BIN=out/minikube-linux-arm64
I1014 13:39:01.535691 8300 out.go:177] - MINIKUBE_FORCE_SYSTEMD=
I1014 13:39:01.537232 8300 driver.go:394] Setting default libvirt URI to qemu:///system
I1014 13:39:01.564142 8300 docker.go:123] docker version: linux-27.3.1:Docker Engine - Community
I1014 13:39:01.564253 8300 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1014 13:39:01.620162 8300 info.go:266] docker info: {ID:5FDH:SA5P:5GCT:NLAS:B73P:SGDQ:PBG5:UBVH:UZY3:RXGO:CI7S:WAIH Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:25 OomKillDisable:true NGoroutines:44 SystemTime:2024-10-14 13:39:01.611082175 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1070-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214831104 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-21-244 Labels:[] ExperimentalBuild:false ServerVersion:27.3.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:7f7fdf5fed64eb6a7caf99b3e12efcf9d60e311c Expected:7f7fdf5fed64eb6a7caf99b3e12efcf9d60e311c} RuncCommit:{ID:v1.1.14-0-g2c9f560 Expected:v1.1.14-0-g2c9f560} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:[WARNING: brid
ge-nf-call-iptables is disabled WARNING: bridge-nf-call-ip6tables is disabled] ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.17.1] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.29.7]] Warnings:<nil>}}
I1014 13:39:01.620265 8300 docker.go:318] overlay module found
I1014 13:39:01.621991 8300 out.go:177] * Using the docker driver based on user configuration
I1014 13:39:01.623225 8300 start.go:297] selected driver: docker
I1014 13:39:01.623240 8300 start.go:901] validating driver "docker" against <nil>
I1014 13:39:01.623253 8300 start.go:912] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I1014 13:39:01.623855 8300 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I1014 13:39:01.686515 8300 info.go:266] docker info: {ID:5FDH:SA5P:5GCT:NLAS:B73P:SGDQ:PBG5:UBVH:UZY3:RXGO:CI7S:WAIH Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:25 OomKillDisable:true NGoroutines:44 SystemTime:2024-10-14 13:39:01.677396922 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1070-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214831104 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-21-244 Labels:[] ExperimentalBuild:false ServerVersion:27.3.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:7f7fdf5fed64eb6a7caf99b3e12efcf9d60e311c Expected:7f7fdf5fed64eb6a7caf99b3e12efcf9d60e311c} RuncCommit:{ID:v1.1.14-0-g2c9f560 Expected:v1.1.14-0-g2c9f560} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:[WARNING: brid
ge-nf-call-iptables is disabled WARNING: bridge-nf-call-ip6tables is disabled] ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.17.1] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.29.7]] Warnings:<nil>}}
I1014 13:39:01.686715 8300 start_flags.go:310] no existing cluster config was found, will generate one from the flags
I1014 13:39:01.686954 8300 start_flags.go:947] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1014 13:39:01.688844 8300 out.go:177] * Using Docker driver with root privileges
I1014 13:39:01.690119 8300 cni.go:84] Creating CNI manager for ""
I1014 13:39:01.690189 8300 cni.go:143] "docker" driver + "crio" runtime found, recommending kindnet
I1014 13:39:01.690213 8300 start_flags.go:319] Found "CNI" CNI - setting NetworkPlugin=cni
I1014 13:39:01.690301 8300 start.go:340] cluster config:
{Name:addons-002422 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1728382586-19774@sha256:5d8c4f6d838467365e214e2194dd0153a763e3f78723b5f2a8e06ef7b47409ec Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.1 ClusterName:addons-002422 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime
:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:crio ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSH
AgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1014 13:39:01.691786 8300 out.go:177] * Starting "addons-002422" primary control-plane node in "addons-002422" cluster
I1014 13:39:01.692864 8300 cache.go:121] Beginning downloading kic base image for docker with crio
I1014 13:39:01.694108 8300 out.go:177] * Pulling base image v0.0.45-1728382586-19774 ...
I1014 13:39:01.695781 8300 preload.go:131] Checking if preload exists for k8s version v1.31.1 and runtime crio
I1014 13:39:01.695827 8300 preload.go:146] Found local preload: /home/jenkins/minikube-integration/19790-2228/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.1-cri-o-overlay-arm64.tar.lz4
I1014 13:39:01.695838 8300 cache.go:56] Caching tarball of preloaded images
I1014 13:39:01.695840 8300 image.go:79] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1728382586-19774@sha256:5d8c4f6d838467365e214e2194dd0153a763e3f78723b5f2a8e06ef7b47409ec in local docker daemon
I1014 13:39:01.695914 8300 preload.go:172] Found /home/jenkins/minikube-integration/19790-2228/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.1-cri-o-overlay-arm64.tar.lz4 in cache, skipping download
I1014 13:39:01.695924 8300 cache.go:59] Finished verifying existence of preloaded tar for v1.31.1 on crio
I1014 13:39:01.696276 8300 profile.go:143] Saving config to /home/jenkins/minikube-integration/19790-2228/.minikube/profiles/addons-002422/config.json ...
I1014 13:39:01.696300 8300 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19790-2228/.minikube/profiles/addons-002422/config.json: {Name:mke32a7b3203164b7b45aacc3b9f08280e6d7f75 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1014 13:39:01.712115 8300 cache.go:149] Downloading gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1728382586-19774@sha256:5d8c4f6d838467365e214e2194dd0153a763e3f78723b5f2a8e06ef7b47409ec to local cache
I1014 13:39:01.712224 8300 image.go:63] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1728382586-19774@sha256:5d8c4f6d838467365e214e2194dd0153a763e3f78723b5f2a8e06ef7b47409ec in local cache directory
I1014 13:39:01.712242 8300 image.go:66] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1728382586-19774@sha256:5d8c4f6d838467365e214e2194dd0153a763e3f78723b5f2a8e06ef7b47409ec in local cache directory, skipping pull
I1014 13:39:01.712246 8300 image.go:135] gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1728382586-19774@sha256:5d8c4f6d838467365e214e2194dd0153a763e3f78723b5f2a8e06ef7b47409ec exists in cache, skipping pull
I1014 13:39:01.712253 8300 cache.go:152] successfully saved gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1728382586-19774@sha256:5d8c4f6d838467365e214e2194dd0153a763e3f78723b5f2a8e06ef7b47409ec as a tarball
I1014 13:39:01.712258 8300 cache.go:162] Loading gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1728382586-19774@sha256:5d8c4f6d838467365e214e2194dd0153a763e3f78723b5f2a8e06ef7b47409ec from local cache
I1014 13:39:18.423690 8300 cache.go:164] successfully loaded and using gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1728382586-19774@sha256:5d8c4f6d838467365e214e2194dd0153a763e3f78723b5f2a8e06ef7b47409ec from cached tarball
I1014 13:39:18.423728 8300 cache.go:194] Successfully downloaded all kic artifacts
I1014 13:39:18.423768 8300 start.go:360] acquireMachinesLock for addons-002422: {Name:mkd84a4fa8b14773f3ba751e5d68c67ef06bd4f3 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I1014 13:39:18.423889 8300 start.go:364] duration metric: took 99.971µs to acquireMachinesLock for "addons-002422"
I1014 13:39:18.423920 8300 start.go:93] Provisioning new machine with config: &{Name:addons-002422 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1728382586-19774@sha256:5d8c4f6d838467365e214e2194dd0153a763e3f78723b5f2a8e06ef7b47409ec Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.1 ClusterName:addons-002422 Namespace:default APIServerHAVIP: APIServerName:min
ikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:crio ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQe
muFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:crio ControlPlane:true Worker:true}
I1014 13:39:18.424000 8300 start.go:125] createHost starting for "" (driver="docker")
I1014 13:39:18.426424 8300 out.go:235] * Creating docker container (CPUs=2, Memory=4000MB) ...
I1014 13:39:18.426686 8300 start.go:159] libmachine.API.Create for "addons-002422" (driver="docker")
I1014 13:39:18.426720 8300 client.go:168] LocalClient.Create starting
I1014 13:39:18.426812 8300 main.go:141] libmachine: Creating CA: /home/jenkins/minikube-integration/19790-2228/.minikube/certs/ca.pem
I1014 13:39:18.926000 8300 main.go:141] libmachine: Creating client certificate: /home/jenkins/minikube-integration/19790-2228/.minikube/certs/cert.pem
I1014 13:39:19.558813 8300 cli_runner.go:164] Run: docker network inspect addons-002422 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W1014 13:39:19.574302 8300 cli_runner.go:211] docker network inspect addons-002422 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I1014 13:39:19.574389 8300 network_create.go:284] running [docker network inspect addons-002422] to gather additional debugging logs...
I1014 13:39:19.574411 8300 cli_runner.go:164] Run: docker network inspect addons-002422
W1014 13:39:19.589486 8300 cli_runner.go:211] docker network inspect addons-002422 returned with exit code 1
I1014 13:39:19.589523 8300 network_create.go:287] error running [docker network inspect addons-002422]: docker network inspect addons-002422: exit status 1
stdout:
[]
stderr:
Error response from daemon: network addons-002422 not found
I1014 13:39:19.589536 8300 network_create.go:289] output of [docker network inspect addons-002422]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network addons-002422 not found
** /stderr **
I1014 13:39:19.589632 8300 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1014 13:39:19.605243 8300 network.go:206] using free private subnet 192.168.49.0/24: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0x400055c310}
I1014 13:39:19.605285 8300 network_create.go:124] attempt to create docker network addons-002422 192.168.49.0/24 with gateway 192.168.49.1 and MTU of 1500 ...
I1014 13:39:19.605337 8300 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.49.0/24 --gateway=192.168.49.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=addons-002422 addons-002422
I1014 13:39:19.672554 8300 network_create.go:108] docker network addons-002422 192.168.49.0/24 created
I1014 13:39:19.672581 8300 kic.go:121] calculated static IP "192.168.49.2" for the "addons-002422" container
I1014 13:39:19.672660 8300 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I1014 13:39:19.687481 8300 cli_runner.go:164] Run: docker volume create addons-002422 --label name.minikube.sigs.k8s.io=addons-002422 --label created_by.minikube.sigs.k8s.io=true
I1014 13:39:19.709849 8300 oci.go:103] Successfully created a docker volume addons-002422
I1014 13:39:19.709939 8300 cli_runner.go:164] Run: docker run --rm --name addons-002422-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-002422 --entrypoint /usr/bin/test -v addons-002422:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1728382586-19774@sha256:5d8c4f6d838467365e214e2194dd0153a763e3f78723b5f2a8e06ef7b47409ec -d /var/lib
I1014 13:39:21.906996 8300 cli_runner.go:217] Completed: docker run --rm --name addons-002422-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-002422 --entrypoint /usr/bin/test -v addons-002422:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1728382586-19774@sha256:5d8c4f6d838467365e214e2194dd0153a763e3f78723b5f2a8e06ef7b47409ec -d /var/lib: (2.197000817s)
I1014 13:39:21.907030 8300 oci.go:107] Successfully prepared a docker volume addons-002422
I1014 13:39:21.907049 8300 preload.go:131] Checking if preload exists for k8s version v1.31.1 and runtime crio
I1014 13:39:21.907067 8300 kic.go:194] Starting extracting preloaded images to volume ...
I1014 13:39:21.907137 8300 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/19790-2228/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.1-cri-o-overlay-arm64.tar.lz4:/preloaded.tar:ro -v addons-002422:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1728382586-19774@sha256:5d8c4f6d838467365e214e2194dd0153a763e3f78723b5f2a8e06ef7b47409ec -I lz4 -xf /preloaded.tar -C /extractDir
I1014 13:39:25.970946 8300 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/19790-2228/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.1-cri-o-overlay-arm64.tar.lz4:/preloaded.tar:ro -v addons-002422:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1728382586-19774@sha256:5d8c4f6d838467365e214e2194dd0153a763e3f78723b5f2a8e06ef7b47409ec -I lz4 -xf /preloaded.tar -C /extractDir: (4.063768387s)
I1014 13:39:25.970975 8300 kic.go:203] duration metric: took 4.063905487s to extract preloaded images to volume ...
W1014 13:39:25.971118 8300 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I1014 13:39:25.971246 8300 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I1014 13:39:26.025710 8300 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname addons-002422 --name addons-002422 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-002422 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=addons-002422 --network addons-002422 --ip 192.168.49.2 --volume addons-002422:/var --security-opt apparmor=unconfined --memory=4000mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1728382586-19774@sha256:5d8c4f6d838467365e214e2194dd0153a763e3f78723b5f2a8e06ef7b47409ec
I1014 13:39:26.381604 8300 cli_runner.go:164] Run: docker container inspect addons-002422 --format={{.State.Running}}
I1014 13:39:26.403583 8300 cli_runner.go:164] Run: docker container inspect addons-002422 --format={{.State.Status}}
I1014 13:39:26.426817 8300 cli_runner.go:164] Run: docker exec addons-002422 stat /var/lib/dpkg/alternatives/iptables
I1014 13:39:26.492116 8300 oci.go:144] the created container "addons-002422" has a running status.
I1014 13:39:26.492143 8300 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/19790-2228/.minikube/machines/addons-002422/id_rsa...
I1014 13:39:27.159451 8300 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/19790-2228/.minikube/machines/addons-002422/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I1014 13:39:27.183362 8300 cli_runner.go:164] Run: docker container inspect addons-002422 --format={{.State.Status}}
I1014 13:39:27.205625 8300 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I1014 13:39:27.205645 8300 kic_runner.go:114] Args: [docker exec --privileged addons-002422 chown docker:docker /home/docker/.ssh/authorized_keys]
I1014 13:39:27.286588 8300 cli_runner.go:164] Run: docker container inspect addons-002422 --format={{.State.Status}}
I1014 13:39:27.318504 8300 machine.go:93] provisionDockerMachine start ...
I1014 13:39:27.318598 8300 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-002422
I1014 13:39:27.342014 8300 main.go:141] libmachine: Using SSH client type: native
I1014 13:39:27.342285 8300 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x413e90] 0x4166d0 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I1014 13:39:27.342296 8300 main.go:141] libmachine: About to run SSH command:
hostname
I1014 13:39:27.476294 8300 main.go:141] libmachine: SSH cmd err, output: <nil>: addons-002422
I1014 13:39:27.476315 8300 ubuntu.go:169] provisioning hostname "addons-002422"
I1014 13:39:27.476377 8300 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-002422
I1014 13:39:27.498515 8300 main.go:141] libmachine: Using SSH client type: native
I1014 13:39:27.498751 8300 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x413e90] 0x4166d0 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I1014 13:39:27.498763 8300 main.go:141] libmachine: About to run SSH command:
sudo hostname addons-002422 && echo "addons-002422" | sudo tee /etc/hostname
I1014 13:39:27.649621 8300 main.go:141] libmachine: SSH cmd err, output: <nil>: addons-002422
I1014 13:39:27.649757 8300 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-002422
I1014 13:39:27.670449 8300 main.go:141] libmachine: Using SSH client type: native
I1014 13:39:27.670685 8300 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x413e90] 0x4166d0 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I1014 13:39:27.670702 8300 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\saddons-002422' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 addons-002422/g' /etc/hosts;
else
echo '127.0.1.1 addons-002422' | sudo tee -a /etc/hosts;
fi
fi
I1014 13:39:27.796523 8300 main.go:141] libmachine: SSH cmd err, output: <nil>:
I1014 13:39:27.796547 8300 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/19790-2228/.minikube CaCertPath:/home/jenkins/minikube-integration/19790-2228/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/19790-2228/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/19790-2228/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/19790-2228/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/19790-2228/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/19790-2228/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/19790-2228/.minikube}
I1014 13:39:27.796597 8300 ubuntu.go:177] setting up certificates
I1014 13:39:27.796609 8300 provision.go:84] configureAuth start
I1014 13:39:27.796680 8300 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-002422
I1014 13:39:27.813604 8300 provision.go:143] copyHostCerts
I1014 13:39:27.813686 8300 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19790-2228/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/19790-2228/.minikube/key.pem (1675 bytes)
I1014 13:39:27.813805 8300 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19790-2228/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/19790-2228/.minikube/ca.pem (1082 bytes)
I1014 13:39:27.813863 8300 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19790-2228/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/19790-2228/.minikube/cert.pem (1123 bytes)
I1014 13:39:27.813939 8300 provision.go:117] generating server cert: /home/jenkins/minikube-integration/19790-2228/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/19790-2228/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/19790-2228/.minikube/certs/ca-key.pem org=jenkins.addons-002422 san=[127.0.0.1 192.168.49.2 addons-002422 localhost minikube]
I1014 13:39:28.604899 8300 provision.go:177] copyRemoteCerts
I1014 13:39:28.604976 8300 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I1014 13:39:28.605031 8300 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-002422
I1014 13:39:28.621097 8300 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19790-2228/.minikube/machines/addons-002422/id_rsa Username:docker}
I1014 13:39:28.713880 8300 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19790-2228/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I1014 13:39:28.737206 8300 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19790-2228/.minikube/machines/server.pem --> /etc/docker/server.pem (1208 bytes)
I1014 13:39:28.760651 8300 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19790-2228/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I1014 13:39:28.783892 8300 provision.go:87] duration metric: took 987.268952ms to configureAuth
I1014 13:39:28.783928 8300 ubuntu.go:193] setting minikube options for container-runtime
I1014 13:39:28.784128 8300 config.go:182] Loaded profile config "addons-002422": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.31.1
I1014 13:39:28.784234 8300 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-002422
I1014 13:39:28.801092 8300 main.go:141] libmachine: Using SSH client type: native
I1014 13:39:28.801333 8300 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x413e90] 0x4166d0 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I1014 13:39:28.801366 8300 main.go:141] libmachine: About to run SSH command:
sudo mkdir -p /etc/sysconfig && printf %s "
CRIO_MINIKUBE_OPTIONS='--insecure-registry 10.96.0.0/12 '
" | sudo tee /etc/sysconfig/crio.minikube && sudo systemctl restart crio
I1014 13:39:29.021776 8300 main.go:141] libmachine: SSH cmd err, output: <nil>:
CRIO_MINIKUBE_OPTIONS='--insecure-registry 10.96.0.0/12 '
I1014 13:39:29.021839 8300 machine.go:96] duration metric: took 1.703315975s to provisionDockerMachine
I1014 13:39:29.021866 8300 client.go:171] duration metric: took 10.595136953s to LocalClient.Create
I1014 13:39:29.021891 8300 start.go:167] duration metric: took 10.595203636s to libmachine.API.Create "addons-002422"
I1014 13:39:29.021923 8300 start.go:293] postStartSetup for "addons-002422" (driver="docker")
I1014 13:39:29.021950 8300 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I1014 13:39:29.022059 8300 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I1014 13:39:29.022138 8300 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-002422
I1014 13:39:29.039955 8300 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19790-2228/.minikube/machines/addons-002422/id_rsa Username:docker}
I1014 13:39:29.138030 8300 ssh_runner.go:195] Run: cat /etc/os-release
I1014 13:39:29.141073 8300 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I1014 13:39:29.141105 8300 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I1014 13:39:29.141118 8300 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I1014 13:39:29.141125 8300 info.go:137] Remote host: Ubuntu 22.04.5 LTS
I1014 13:39:29.141135 8300 filesync.go:126] Scanning /home/jenkins/minikube-integration/19790-2228/.minikube/addons for local assets ...
I1014 13:39:29.141205 8300 filesync.go:126] Scanning /home/jenkins/minikube-integration/19790-2228/.minikube/files for local assets ...
I1014 13:39:29.141241 8300 start.go:296] duration metric: took 119.286948ms for postStartSetup
I1014 13:39:29.141939 8300 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-002422
I1014 13:39:29.161897 8300 profile.go:143] Saving config to /home/jenkins/minikube-integration/19790-2228/.minikube/profiles/addons-002422/config.json ...
I1014 13:39:29.162248 8300 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I1014 13:39:29.162301 8300 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-002422
I1014 13:39:29.179425 8300 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19790-2228/.minikube/machines/addons-002422/id_rsa Username:docker}
I1014 13:39:29.273204 8300 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I1014 13:39:29.277388 8300 start.go:128] duration metric: took 10.853372344s to createHost
I1014 13:39:29.277420 8300 start.go:83] releasing machines lock for "addons-002422", held for 10.853516426s
I1014 13:39:29.277488 8300 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-002422
I1014 13:39:29.292658 8300 ssh_runner.go:195] Run: cat /version.json
I1014 13:39:29.292711 8300 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-002422
I1014 13:39:29.293039 8300 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I1014 13:39:29.293123 8300 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-002422
I1014 13:39:29.309343 8300 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19790-2228/.minikube/machines/addons-002422/id_rsa Username:docker}
I1014 13:39:29.318878 8300 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19790-2228/.minikube/machines/addons-002422/id_rsa Username:docker}
I1014 13:39:29.400173 8300 ssh_runner.go:195] Run: systemctl --version
I1014 13:39:29.535275 8300 ssh_runner.go:195] Run: sudo sh -c "podman version >/dev/null"
I1014 13:39:29.682522 8300 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I1014 13:39:29.686453 8300 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1014 13:39:29.706862 8300 cni.go:221] loopback cni configuration disabled: "/etc/cni/net.d/*loopback.conf*" found
I1014 13:39:29.706974 8300 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I1014 13:39:29.734354 8300 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I1014 13:39:29.734375 8300 start.go:495] detecting cgroup driver to use...
I1014 13:39:29.734406 8300 detect.go:187] detected "cgroupfs" cgroup driver on host os
I1014 13:39:29.734454 8300 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
I1014 13:39:29.749192 8300 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I1014 13:39:29.760184 8300 docker.go:217] disabling cri-docker service (if available) ...
I1014 13:39:29.760246 8300 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I1014 13:39:29.774112 8300 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I1014 13:39:29.788395 8300 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I1014 13:39:29.880801 8300 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I1014 13:39:29.972415 8300 docker.go:233] disabling docker service ...
I1014 13:39:29.972481 8300 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I1014 13:39:29.992061 8300 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I1014 13:39:30.011825 8300 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I1014 13:39:30.109186 8300 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I1014 13:39:30.209178 8300 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I1014 13:39:30.221080 8300 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/crio/crio.sock
" | sudo tee /etc/crictl.yaml"
I1014 13:39:30.237424 8300 crio.go:59] configure cri-o to use "registry.k8s.io/pause:3.10" pause image...
I1014 13:39:30.237513 8300 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|^.*pause_image = .*$|pause_image = "registry.k8s.io/pause:3.10"|' /etc/crio/crio.conf.d/02-crio.conf"
I1014 13:39:30.247070 8300 crio.go:70] configuring cri-o to use "cgroupfs" as cgroup driver...
I1014 13:39:30.247171 8300 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|^.*cgroup_manager = .*$|cgroup_manager = "cgroupfs"|' /etc/crio/crio.conf.d/02-crio.conf"
I1014 13:39:30.256865 8300 ssh_runner.go:195] Run: sh -c "sudo sed -i '/conmon_cgroup = .*/d' /etc/crio/crio.conf.d/02-crio.conf"
I1014 13:39:30.266642 8300 ssh_runner.go:195] Run: sh -c "sudo sed -i '/cgroup_manager = .*/a conmon_cgroup = "pod"' /etc/crio/crio.conf.d/02-crio.conf"
I1014 13:39:30.277380 8300 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I1014 13:39:30.286952 8300 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *"net.ipv4.ip_unprivileged_port_start=.*"/d' /etc/crio/crio.conf.d/02-crio.conf"
I1014 13:39:30.297328 8300 ssh_runner.go:195] Run: sh -c "sudo grep -q "^ *default_sysctls" /etc/crio/crio.conf.d/02-crio.conf || sudo sed -i '/conmon_cgroup = .*/a default_sysctls = \[\n\]' /etc/crio/crio.conf.d/02-crio.conf"
I1014 13:39:30.313264 8300 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^default_sysctls *= *\[|&\n "net.ipv4.ip_unprivileged_port_start=0",|' /etc/crio/crio.conf.d/02-crio.conf"
I1014 13:39:30.323340 8300 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I1014 13:39:30.331728 8300 crio.go:166] couldn't verify netfilter by "sudo sysctl net.bridge.bridge-nf-call-iptables" which might be okay. error: sudo sysctl net.bridge.bridge-nf-call-iptables: Process exited with status 255
stdout:
stderr:
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
I1014 13:39:30.331833 8300 ssh_runner.go:195] Run: sudo modprobe br_netfilter
I1014 13:39:30.345487 8300 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I1014 13:39:30.354185 8300 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1014 13:39:30.443199 8300 ssh_runner.go:195] Run: sudo systemctl restart crio
I1014 13:39:30.559773 8300 start.go:542] Will wait 60s for socket path /var/run/crio/crio.sock
I1014 13:39:30.559901 8300 ssh_runner.go:195] Run: stat /var/run/crio/crio.sock
I1014 13:39:30.563352 8300 start.go:563] Will wait 60s for crictl version
I1014 13:39:30.563470 8300 ssh_runner.go:195] Run: which crictl
I1014 13:39:30.567069 8300 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I1014 13:39:30.608005 8300 start.go:579] Version: 0.1.0
RuntimeName: cri-o
RuntimeVersion: 1.24.6
RuntimeApiVersion: v1
I1014 13:39:30.608183 8300 ssh_runner.go:195] Run: crio --version
I1014 13:39:30.644625 8300 ssh_runner.go:195] Run: crio --version
I1014 13:39:30.685006 8300 out.go:177] * Preparing Kubernetes v1.31.1 on CRI-O 1.24.6 ...
I1014 13:39:30.686361 8300 cli_runner.go:164] Run: docker network inspect addons-002422 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I1014 13:39:30.703035 8300 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I1014 13:39:30.706678 8300 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1014 13:39:30.717505 8300 kubeadm.go:883] updating cluster {Name:addons-002422 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1728382586-19774@sha256:5d8c4f6d838467365e214e2194dd0153a763e3f78723b5f2a8e06ef7b47409ec Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.1 ClusterName:addons-002422 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNa
mes:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:crio ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmw
arePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I1014 13:39:30.717623 8300 preload.go:131] Checking if preload exists for k8s version v1.31.1 and runtime crio
I1014 13:39:30.717682 8300 ssh_runner.go:195] Run: sudo crictl images --output json
I1014 13:39:30.792087 8300 crio.go:514] all images are preloaded for cri-o runtime.
I1014 13:39:30.792116 8300 crio.go:433] Images already preloaded, skipping extraction
I1014 13:39:30.792174 8300 ssh_runner.go:195] Run: sudo crictl images --output json
I1014 13:39:30.827792 8300 crio.go:514] all images are preloaded for cri-o runtime.
I1014 13:39:30.827816 8300 cache_images.go:84] Images are preloaded, skipping loading
I1014 13:39:30.827824 8300 kubeadm.go:934] updating node { 192.168.49.2 8443 v1.31.1 crio true true} ...
I1014 13:39:30.827955 8300 kubeadm.go:946] kubelet [Unit]
Wants=crio.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.31.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroups-per-qos=false --config=/var/lib/kubelet/config.yaml --enforce-node-allocatable= --hostname-override=addons-002422 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.2
[Install]
config:
{KubernetesVersion:v1.31.1 ClusterName:addons-002422 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I1014 13:39:30.828039 8300 ssh_runner.go:195] Run: crio config
I1014 13:39:30.874149 8300 cni.go:84] Creating CNI manager for ""
I1014 13:39:30.874171 8300 cni.go:143] "docker" driver + "crio" runtime found, recommending kindnet
I1014 13:39:30.874181 8300 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
I1014 13:39:30.874224 8300 kubeadm.go:189] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8443 KubernetesVersion:v1.31.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:addons-002422 NodeName:addons-002422 DNSDomain:cluster.local CRISocket:/var/run/crio/crio.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.49.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kuberne
tes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/crio/crio.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I1014 13:39:30.874361 8300 kubeadm.go:195] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.49.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/crio/crio.sock
name: "addons-002422"
kubeletExtraArgs:
- name: "node-ip"
value: "192.168.49.2"
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
extraArgs:
- name: "enable-admission-plugins"
value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
- name: "allocate-node-cidrs"
value: "true"
- name: "leader-elect"
value: "false"
scheduler:
extraArgs:
- name: "leader-elect"
value: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
- name: "proxy-refresh-interval"
value: "70000"
kubernetesVersion: v1.31.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///var/run/crio/crio.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I1014 13:39:30.874429 8300 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.31.1
I1014 13:39:30.882973 8300 binaries.go:44] Found k8s binaries, skipping transfer
I1014 13:39:30.883071 8300 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I1014 13:39:30.892223 8300 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (363 bytes)
I1014 13:39:30.909506 8300 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I1014 13:39:30.926769 8300 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2287 bytes)
I1014 13:39:30.944321 8300 ssh_runner.go:195] Run: grep 192.168.49.2 control-plane.minikube.internal$ /etc/hosts
I1014 13:39:30.947745 8300 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I1014 13:39:30.958400 8300 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1014 13:39:31.045686 8300 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1014 13:39:31.059522 8300 certs.go:68] Setting up /home/jenkins/minikube-integration/19790-2228/.minikube/profiles/addons-002422 for IP: 192.168.49.2
I1014 13:39:31.059593 8300 certs.go:194] generating shared ca certs ...
I1014 13:39:31.059622 8300 certs.go:226] acquiring lock for ca certs: {Name:mk06df15dc793252bd5ffa6daa3e5f2510797850 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1014 13:39:31.059783 8300 certs.go:240] generating "minikubeCA" ca cert: /home/jenkins/minikube-integration/19790-2228/.minikube/ca.key
I1014 13:39:31.279549 8300 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19790-2228/.minikube/ca.crt ...
I1014 13:39:31.279582 8300 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19790-2228/.minikube/ca.crt: {Name:mkf2e09cdeaf406bd5dbfb6df51fda19d11b3a3a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1014 13:39:31.279812 8300 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19790-2228/.minikube/ca.key ...
I1014 13:39:31.279826 8300 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19790-2228/.minikube/ca.key: {Name:mkbb0140f8b18956b3e337fe5d9dac3444c3cff6 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1014 13:39:31.279917 8300 certs.go:240] generating "proxyClientCA" ca cert: /home/jenkins/minikube-integration/19790-2228/.minikube/proxy-client-ca.key
I1014 13:39:32.102633 8300 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19790-2228/.minikube/proxy-client-ca.crt ...
I1014 13:39:32.102667 8300 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19790-2228/.minikube/proxy-client-ca.crt: {Name:mk87e80ab56810a443caa4380c01f4fa59f6347a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1014 13:39:32.102908 8300 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19790-2228/.minikube/proxy-client-ca.key ...
I1014 13:39:32.102928 8300 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19790-2228/.minikube/proxy-client-ca.key: {Name:mk3f83de2f8ad31643196f738fbd59675505d818 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1014 13:39:32.103014 8300 certs.go:256] generating profile certs ...
I1014 13:39:32.103079 8300 certs.go:363] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/19790-2228/.minikube/profiles/addons-002422/client.key
I1014 13:39:32.103097 8300 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19790-2228/.minikube/profiles/addons-002422/client.crt with IP's: []
I1014 13:39:32.527349 8300 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19790-2228/.minikube/profiles/addons-002422/client.crt ...
I1014 13:39:32.527383 8300 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19790-2228/.minikube/profiles/addons-002422/client.crt: {Name:mk7e896bcb1761dc92896d4828a4f921b266d096 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1014 13:39:32.527596 8300 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19790-2228/.minikube/profiles/addons-002422/client.key ...
I1014 13:39:32.527612 8300 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19790-2228/.minikube/profiles/addons-002422/client.key: {Name:mk2471b3e7dfa66ccab07ee70fc530ef48ac5f1b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1014 13:39:32.527706 8300 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/19790-2228/.minikube/profiles/addons-002422/apiserver.key.17286ce0
I1014 13:39:32.527726 8300 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19790-2228/.minikube/profiles/addons-002422/apiserver.crt.17286ce0 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.49.2]
I1014 13:39:33.097055 8300 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19790-2228/.minikube/profiles/addons-002422/apiserver.crt.17286ce0 ...
I1014 13:39:33.097092 8300 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19790-2228/.minikube/profiles/addons-002422/apiserver.crt.17286ce0: {Name:mk0b396ed04de990231c7535e37286cbdddbeccb Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1014 13:39:33.097278 8300 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19790-2228/.minikube/profiles/addons-002422/apiserver.key.17286ce0 ...
I1014 13:39:33.097292 8300 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19790-2228/.minikube/profiles/addons-002422/apiserver.key.17286ce0: {Name:mkeaac9f624665f13ab091190d99656a19ad24ce Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1014 13:39:33.097375 8300 certs.go:381] copying /home/jenkins/minikube-integration/19790-2228/.minikube/profiles/addons-002422/apiserver.crt.17286ce0 -> /home/jenkins/minikube-integration/19790-2228/.minikube/profiles/addons-002422/apiserver.crt
I1014 13:39:33.097463 8300 certs.go:385] copying /home/jenkins/minikube-integration/19790-2228/.minikube/profiles/addons-002422/apiserver.key.17286ce0 -> /home/jenkins/minikube-integration/19790-2228/.minikube/profiles/addons-002422/apiserver.key
I1014 13:39:33.097517 8300 certs.go:363] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/19790-2228/.minikube/profiles/addons-002422/proxy-client.key
I1014 13:39:33.097536 8300 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19790-2228/.minikube/profiles/addons-002422/proxy-client.crt with IP's: []
I1014 13:39:33.368114 8300 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19790-2228/.minikube/profiles/addons-002422/proxy-client.crt ...
I1014 13:39:33.368146 8300 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19790-2228/.minikube/profiles/addons-002422/proxy-client.crt: {Name:mk617006c2b50b41e3bf3976f48c6e2173294ddb Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1014 13:39:33.368332 8300 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19790-2228/.minikube/profiles/addons-002422/proxy-client.key ...
I1014 13:39:33.368345 8300 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19790-2228/.minikube/profiles/addons-002422/proxy-client.key: {Name:mk2d2071f6a997e883c7ef5cbfc1c62f114134be Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1014 13:39:33.368542 8300 certs.go:484] found cert: /home/jenkins/minikube-integration/19790-2228/.minikube/certs/ca-key.pem (1679 bytes)
I1014 13:39:33.368583 8300 certs.go:484] found cert: /home/jenkins/minikube-integration/19790-2228/.minikube/certs/ca.pem (1082 bytes)
I1014 13:39:33.368611 8300 certs.go:484] found cert: /home/jenkins/minikube-integration/19790-2228/.minikube/certs/cert.pem (1123 bytes)
I1014 13:39:33.368640 8300 certs.go:484] found cert: /home/jenkins/minikube-integration/19790-2228/.minikube/certs/key.pem (1675 bytes)
I1014 13:39:33.369276 8300 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19790-2228/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I1014 13:39:33.396664 8300 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19790-2228/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I1014 13:39:33.421787 8300 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19790-2228/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I1014 13:39:33.446268 8300 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19790-2228/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I1014 13:39:33.470393 8300 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19790-2228/.minikube/profiles/addons-002422/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1419 bytes)
I1014 13:39:33.498381 8300 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19790-2228/.minikube/profiles/addons-002422/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
I1014 13:39:33.522109 8300 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19790-2228/.minikube/profiles/addons-002422/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I1014 13:39:33.545983 8300 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19790-2228/.minikube/profiles/addons-002422/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I1014 13:39:33.569791 8300 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19790-2228/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I1014 13:39:33.594911 8300 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I1014 13:39:33.613470 8300 ssh_runner.go:195] Run: openssl version
I1014 13:39:33.618883 8300 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I1014 13:39:33.628463 8300 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I1014 13:39:33.631697 8300 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Oct 14 13:39 /usr/share/ca-certificates/minikubeCA.pem
I1014 13:39:33.631783 8300 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I1014 13:39:33.638573 8300 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I1014 13:39:33.647929 8300 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I1014 13:39:33.651145 8300 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I1014 13:39:33.651190 8300 kubeadm.go:392] StartCluster: {Name:addons-002422 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1728382586-19774@sha256:5d8c4f6d838467365e214e2194dd0153a763e3f78723b5f2a8e06ef7b47409ec Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.1 ClusterName:addons-002422 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames
:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:crio ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmware
Path: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I1014 13:39:33.651267 8300 cri.go:54] listing CRI containers in root : {State:paused Name: Namespaces:[kube-system]}
I1014 13:39:33.651321 8300 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I1014 13:39:33.690330 8300 cri.go:89] found id: ""
I1014 13:39:33.690396 8300 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I1014 13:39:33.699200 8300 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I1014 13:39:33.707904 8300 kubeadm.go:214] ignoring SystemVerification for kubeadm because of docker driver
I1014 13:39:33.707968 8300 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I1014 13:39:33.716528 8300 kubeadm.go:155] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I1014 13:39:33.716548 8300 kubeadm.go:157] found existing configuration files:
I1014 13:39:33.716598 8300 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I1014 13:39:33.725169 8300 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I1014 13:39:33.725233 8300 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I1014 13:39:33.733899 8300 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I1014 13:39:33.742056 8300 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I1014 13:39:33.742158 8300 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I1014 13:39:33.750333 8300 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I1014 13:39:33.758992 8300 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I1014 13:39:33.759078 8300 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I1014 13:39:33.767897 8300 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I1014 13:39:33.776528 8300 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I1014 13:39:33.776599 8300 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I1014 13:39:33.785190 8300 ssh_runner.go:286] Start: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.31.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I1014 13:39:33.823923 8300 kubeadm.go:310] [init] Using Kubernetes version: v1.31.1
I1014 13:39:33.824199 8300 kubeadm.go:310] [preflight] Running pre-flight checks
I1014 13:39:33.844918 8300 kubeadm.go:310] [preflight] The system verification failed. Printing the output from the verification:
I1014 13:39:33.845063 8300 kubeadm.go:310] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1070-aws[0m
I1014 13:39:33.845122 8300 kubeadm.go:310] [0;37mOS[0m: [0;32mLinux[0m
I1014 13:39:33.845223 8300 kubeadm.go:310] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I1014 13:39:33.845287 8300 kubeadm.go:310] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I1014 13:39:33.845338 8300 kubeadm.go:310] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I1014 13:39:33.845390 8300 kubeadm.go:310] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I1014 13:39:33.845445 8300 kubeadm.go:310] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I1014 13:39:33.845501 8300 kubeadm.go:310] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I1014 13:39:33.845550 8300 kubeadm.go:310] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I1014 13:39:33.845602 8300 kubeadm.go:310] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I1014 13:39:33.845653 8300 kubeadm.go:310] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I1014 13:39:33.916605 8300 kubeadm.go:310] [preflight] Pulling images required for setting up a Kubernetes cluster
I1014 13:39:33.916807 8300 kubeadm.go:310] [preflight] This might take a minute or two, depending on the speed of your internet connection
I1014 13:39:33.916916 8300 kubeadm.go:310] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I1014 13:39:33.925110 8300 kubeadm.go:310] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I1014 13:39:33.930119 8300 out.go:235] - Generating certificates and keys ...
I1014 13:39:33.930214 8300 kubeadm.go:310] [certs] Using existing ca certificate authority
I1014 13:39:33.930332 8300 kubeadm.go:310] [certs] Using existing apiserver certificate and key on disk
I1014 13:39:34.198508 8300 kubeadm.go:310] [certs] Generating "apiserver-kubelet-client" certificate and key
I1014 13:39:34.622750 8300 kubeadm.go:310] [certs] Generating "front-proxy-ca" certificate and key
I1014 13:39:34.805332 8300 kubeadm.go:310] [certs] Generating "front-proxy-client" certificate and key
I1014 13:39:35.248566 8300 kubeadm.go:310] [certs] Generating "etcd/ca" certificate and key
I1014 13:39:35.947522 8300 kubeadm.go:310] [certs] Generating "etcd/server" certificate and key
I1014 13:39:35.947821 8300 kubeadm.go:310] [certs] etcd/server serving cert is signed for DNS names [addons-002422 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I1014 13:39:36.290501 8300 kubeadm.go:310] [certs] Generating "etcd/peer" certificate and key
I1014 13:39:36.295957 8300 kubeadm.go:310] [certs] etcd/peer serving cert is signed for DNS names [addons-002422 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I1014 13:39:36.540393 8300 kubeadm.go:310] [certs] Generating "etcd/healthcheck-client" certificate and key
I1014 13:39:36.985910 8300 kubeadm.go:310] [certs] Generating "apiserver-etcd-client" certificate and key
I1014 13:39:37.131122 8300 kubeadm.go:310] [certs] Generating "sa" key and public key
I1014 13:39:37.131491 8300 kubeadm.go:310] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I1014 13:39:37.561848 8300 kubeadm.go:310] [kubeconfig] Writing "admin.conf" kubeconfig file
I1014 13:39:38.018910 8300 kubeadm.go:310] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I1014 13:39:38.921446 8300 kubeadm.go:310] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I1014 13:39:39.097017 8300 kubeadm.go:310] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I1014 13:39:39.398377 8300 kubeadm.go:310] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I1014 13:39:39.399030 8300 kubeadm.go:310] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I1014 13:39:39.401991 8300 kubeadm.go:310] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I1014 13:39:39.403670 8300 out.go:235] - Booting up control plane ...
I1014 13:39:39.403765 8300 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-apiserver"
I1014 13:39:39.403841 8300 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I1014 13:39:39.404568 8300 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-scheduler"
I1014 13:39:39.414705 8300 kubeadm.go:310] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I1014 13:39:39.420512 8300 kubeadm.go:310] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1014 13:39:39.420912 8300 kubeadm.go:310] [kubelet-start] Starting the kubelet
I1014 13:39:39.515212 8300 kubeadm.go:310] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I1014 13:39:39.515331 8300 kubeadm.go:310] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I1014 13:39:40.517315 8300 kubeadm.go:310] [kubelet-check] The kubelet is healthy after 1.001877986s
I1014 13:39:40.517407 8300 kubeadm.go:310] [api-check] Waiting for a healthy API server. This can take up to 4m0s
I1014 13:39:46.520416 8300 kubeadm.go:310] [api-check] The API server is healthy after 6.001293176s
I1014 13:39:46.537487 8300 kubeadm.go:310] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1014 13:39:46.551261 8300 kubeadm.go:310] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I1014 13:39:46.577727 8300 kubeadm.go:310] [upload-certs] Skipping phase. Please see --upload-certs
I1014 13:39:46.577920 8300 kubeadm.go:310] [mark-control-plane] Marking the node addons-002422 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I1014 13:39:46.587508 8300 kubeadm.go:310] [bootstrap-token] Using token: p0ldfg.l4f8resh3yr04gj6
I1014 13:39:46.588848 8300 out.go:235] - Configuring RBAC rules ...
I1014 13:39:46.588969 8300 kubeadm.go:310] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I1014 13:39:46.594842 8300 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I1014 13:39:46.605038 8300 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I1014 13:39:46.610706 8300 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I1014 13:39:46.615031 8300 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I1014 13:39:46.619515 8300 kubeadm.go:310] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I1014 13:39:46.925078 8300 kubeadm.go:310] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I1014 13:39:47.354345 8300 kubeadm.go:310] [addons] Applied essential addon: CoreDNS
I1014 13:39:47.924387 8300 kubeadm.go:310] [addons] Applied essential addon: kube-proxy
I1014 13:39:47.925552 8300 kubeadm.go:310]
I1014 13:39:47.925643 8300 kubeadm.go:310] Your Kubernetes control-plane has initialized successfully!
I1014 13:39:47.925659 8300 kubeadm.go:310]
I1014 13:39:47.925756 8300 kubeadm.go:310] To start using your cluster, you need to run the following as a regular user:
I1014 13:39:47.925765 8300 kubeadm.go:310]
I1014 13:39:47.925801 8300 kubeadm.go:310] mkdir -p $HOME/.kube
I1014 13:39:47.925880 8300 kubeadm.go:310] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I1014 13:39:47.925939 8300 kubeadm.go:310] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I1014 13:39:47.925943 8300 kubeadm.go:310]
I1014 13:39:47.926001 8300 kubeadm.go:310] Alternatively, if you are the root user, you can run:
I1014 13:39:47.926005 8300 kubeadm.go:310]
I1014 13:39:47.926057 8300 kubeadm.go:310] export KUBECONFIG=/etc/kubernetes/admin.conf
I1014 13:39:47.926061 8300 kubeadm.go:310]
I1014 13:39:47.926116 8300 kubeadm.go:310] You should now deploy a pod network to the cluster.
I1014 13:39:47.926206 8300 kubeadm.go:310] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I1014 13:39:47.926279 8300 kubeadm.go:310] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I1014 13:39:47.926283 8300 kubeadm.go:310]
I1014 13:39:47.926378 8300 kubeadm.go:310] You can now join any number of control-plane nodes by copying certificate authorities
I1014 13:39:47.926460 8300 kubeadm.go:310] and service account keys on each node and then running the following as root:
I1014 13:39:47.926464 8300 kubeadm.go:310]
I1014 13:39:47.926553 8300 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token p0ldfg.l4f8resh3yr04gj6 \
I1014 13:39:47.926662 8300 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:7f4316051a451070b62e5ea00267a1d9ae2a3434782771c12eaedf3124887c0a \
I1014 13:39:47.926684 8300 kubeadm.go:310] --control-plane
I1014 13:39:47.926688 8300 kubeadm.go:310]
I1014 13:39:47.926779 8300 kubeadm.go:310] Then you can join any number of worker nodes by running the following on each as root:
I1014 13:39:47.926783 8300 kubeadm.go:310]
I1014 13:39:47.926870 8300 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token p0ldfg.l4f8resh3yr04gj6 \
I1014 13:39:47.926979 8300 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:7f4316051a451070b62e5ea00267a1d9ae2a3434782771c12eaedf3124887c0a
I1014 13:39:47.929366 8300 kubeadm.go:310] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1070-aws\n", err: exit status 1
I1014 13:39:47.929552 8300 kubeadm.go:310] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I1014 13:39:47.929594 8300 cni.go:84] Creating CNI manager for ""
I1014 13:39:47.929630 8300 cni.go:143] "docker" driver + "crio" runtime found, recommending kindnet
I1014 13:39:47.931645 8300 out.go:177] * Configuring CNI (Container Networking Interface) ...
I1014 13:39:47.932940 8300 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I1014 13:39:47.936529 8300 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.31.1/kubectl ...
I1014 13:39:47.936549 8300 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2601 bytes)
I1014 13:39:47.953687 8300 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I1014 13:39:48.223733 8300 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I1014 13:39:48.223882 8300 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I1014 13:39:48.223931 8300 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes addons-002422 minikube.k8s.io/updated_at=2024_10_14T13_39_48_0700 minikube.k8s.io/version=v1.34.0 minikube.k8s.io/commit=f9f6c2ada6d933af9900f45012fe0fe625736c5b minikube.k8s.io/name=addons-002422 minikube.k8s.io/primary=true
I1014 13:39:48.381596 8300 ops.go:34] apiserver oom_adj: -16
I1014 13:39:48.381696 8300 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1014 13:39:48.882570 8300 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1014 13:39:49.381802 8300 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1014 13:39:49.882430 8300 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1014 13:39:50.381796 8300 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1014 13:39:50.882381 8300 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1014 13:39:51.382632 8300 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1014 13:39:51.882261 8300 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1014 13:39:52.381834 8300 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I1014 13:39:52.506604 8300 kubeadm.go:1113] duration metric: took 4.282782773s to wait for elevateKubeSystemPrivileges
I1014 13:39:52.506630 8300 kubeadm.go:394] duration metric: took 18.855443881s to StartCluster
I1014 13:39:52.506645 8300 settings.go:142] acquiring lock: {Name:mk543bfe3e4ad3a74f943b74c0d30c5d6649b3b6 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1014 13:39:52.506755 8300 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/19790-2228/kubeconfig
I1014 13:39:52.507116 8300 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19790-2228/kubeconfig: {Name:mkdfcbe4a3a3bd606687ca36b460845a3c3f03d7 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I1014 13:39:52.507287 8300 start.go:235] Will wait 6m0s for node &{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:crio ControlPlane:true Worker:true}
I1014 13:39:52.507446 8300 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I1014 13:39:52.507675 8300 config.go:182] Loaded profile config "addons-002422": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.31.1
I1014 13:39:52.507742 8300 addons.go:507] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:true auto-pause:false cloud-spanner:true csi-hostpath-driver:true dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:true gvisor:false headlamp:false inaccel:false ingress:true ingress-dns:true inspektor-gadget:true istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:true nvidia-device-plugin:true nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:true registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:true volcano:true volumesnapshots:true yakd:true]
I1014 13:39:52.507819 8300 addons.go:69] Setting yakd=true in profile "addons-002422"
I1014 13:39:52.507833 8300 addons.go:234] Setting addon yakd=true in "addons-002422"
I1014 13:39:52.507856 8300 host.go:66] Checking if "addons-002422" exists ...
I1014 13:39:52.508310 8300 cli_runner.go:164] Run: docker container inspect addons-002422 --format={{.State.Status}}
I1014 13:39:52.509142 8300 addons.go:69] Setting metrics-server=true in profile "addons-002422"
I1014 13:39:52.509163 8300 addons.go:234] Setting addon metrics-server=true in "addons-002422"
I1014 13:39:52.509188 8300 host.go:66] Checking if "addons-002422" exists ...
I1014 13:39:52.509478 8300 addons.go:69] Setting nvidia-device-plugin=true in profile "addons-002422"
I1014 13:39:52.509491 8300 addons.go:234] Setting addon nvidia-device-plugin=true in "addons-002422"
I1014 13:39:52.509510 8300 host.go:66] Checking if "addons-002422" exists ...
I1014 13:39:52.510208 8300 cli_runner.go:164] Run: docker container inspect addons-002422 --format={{.State.Status}}
I1014 13:39:52.510573 8300 cli_runner.go:164] Run: docker container inspect addons-002422 --format={{.State.Status}}
I1014 13:39:52.511231 8300 out.go:177] * Verifying Kubernetes components...
I1014 13:39:52.510580 8300 addons.go:69] Setting registry=true in profile "addons-002422"
I1014 13:39:52.511505 8300 addons.go:234] Setting addon registry=true in "addons-002422"
I1014 13:39:52.511538 8300 host.go:66] Checking if "addons-002422" exists ...
I1014 13:39:52.511949 8300 cli_runner.go:164] Run: docker container inspect addons-002422 --format={{.State.Status}}
I1014 13:39:52.510587 8300 addons.go:69] Setting storage-provisioner=true in profile "addons-002422"
I1014 13:39:52.520596 8300 addons.go:234] Setting addon storage-provisioner=true in "addons-002422"
I1014 13:39:52.520640 8300 host.go:66] Checking if "addons-002422" exists ...
I1014 13:39:52.521115 8300 cli_runner.go:164] Run: docker container inspect addons-002422 --format={{.State.Status}}
I1014 13:39:52.510591 8300 addons.go:69] Setting storage-provisioner-rancher=true in profile "addons-002422"
I1014 13:39:52.532981 8300 addons_storage_classes.go:33] enableOrDisableStorageClasses storage-provisioner-rancher=true on "addons-002422"
I1014 13:39:52.533369 8300 cli_runner.go:164] Run: docker container inspect addons-002422 --format={{.State.Status}}
I1014 13:39:52.510595 8300 addons.go:69] Setting volcano=true in profile "addons-002422"
I1014 13:39:52.550220 8300 addons.go:234] Setting addon volcano=true in "addons-002422"
I1014 13:39:52.550275 8300 host.go:66] Checking if "addons-002422" exists ...
I1014 13:39:52.510598 8300 addons.go:69] Setting volumesnapshots=true in profile "addons-002422"
I1014 13:39:52.552126 8300 addons.go:234] Setting addon volumesnapshots=true in "addons-002422"
I1014 13:39:52.552174 8300 host.go:66] Checking if "addons-002422" exists ...
I1014 13:39:52.552826 8300 cli_runner.go:164] Run: docker container inspect addons-002422 --format={{.State.Status}}
I1014 13:39:52.553954 8300 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I1014 13:39:52.565059 8300 cli_runner.go:164] Run: docker container inspect addons-002422 --format={{.State.Status}}
I1014 13:39:52.510632 8300 addons.go:69] Setting default-storageclass=true in profile "addons-002422"
I1014 13:39:52.572346 8300 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "addons-002422"
I1014 13:39:52.572712 8300 cli_runner.go:164] Run: docker container inspect addons-002422 --format={{.State.Status}}
I1014 13:39:52.510636 8300 addons.go:69] Setting amd-gpu-device-plugin=true in profile "addons-002422"
I1014 13:39:52.590505 8300 addons.go:234] Setting addon amd-gpu-device-plugin=true in "addons-002422"
I1014 13:39:52.510639 8300 addons.go:69] Setting cloud-spanner=true in profile "addons-002422"
I1014 13:39:52.590563 8300 addons.go:234] Setting addon cloud-spanner=true in "addons-002422"
I1014 13:39:52.590589 8300 host.go:66] Checking if "addons-002422" exists ...
I1014 13:39:52.510643 8300 addons.go:69] Setting csi-hostpath-driver=true in profile "addons-002422"
I1014 13:39:52.590669 8300 addons.go:234] Setting addon csi-hostpath-driver=true in "addons-002422"
I1014 13:39:52.590688 8300 host.go:66] Checking if "addons-002422" exists ...
I1014 13:39:52.591142 8300 cli_runner.go:164] Run: docker container inspect addons-002422 --format={{.State.Status}}
I1014 13:39:52.593729 8300 out.go:177] - Using image docker.io/marcnuri/yakd:0.0.5
I1014 13:39:52.596547 8300 host.go:66] Checking if "addons-002422" exists ...
I1014 13:39:52.597176 8300 cli_runner.go:164] Run: docker container inspect addons-002422 --format={{.State.Status}}
I1014 13:39:52.510647 8300 addons.go:69] Setting ingress=true in profile "addons-002422"
I1014 13:39:52.606101 8300 addons.go:234] Setting addon ingress=true in "addons-002422"
I1014 13:39:52.606149 8300 host.go:66] Checking if "addons-002422" exists ...
I1014 13:39:52.510650 8300 addons.go:69] Setting gcp-auth=true in profile "addons-002422"
I1014 13:39:52.606423 8300 mustload.go:65] Loading cluster: addons-002422
I1014 13:39:52.606578 8300 config.go:182] Loaded profile config "addons-002422": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.31.1
I1014 13:39:52.606878 8300 cli_runner.go:164] Run: docker container inspect addons-002422 --format={{.State.Status}}
I1014 13:39:52.612375 8300 cli_runner.go:164] Run: docker container inspect addons-002422 --format={{.State.Status}}
I1014 13:39:52.510654 8300 addons.go:69] Setting ingress-dns=true in profile "addons-002422"
I1014 13:39:52.616829 8300 addons.go:234] Setting addon ingress-dns=true in "addons-002422"
I1014 13:39:52.618350 8300 host.go:66] Checking if "addons-002422" exists ...
I1014 13:39:52.618913 8300 cli_runner.go:164] Run: docker container inspect addons-002422 --format={{.State.Status}}
I1014 13:39:52.510659 8300 addons.go:69] Setting inspektor-gadget=true in profile "addons-002422"
I1014 13:39:52.672064 8300 addons.go:234] Setting addon inspektor-gadget=true in "addons-002422"
I1014 13:39:52.672106 8300 host.go:66] Checking if "addons-002422" exists ...
I1014 13:39:52.672586 8300 cli_runner.go:164] Run: docker container inspect addons-002422 --format={{.State.Status}}
I1014 13:39:52.674339 8300 addons.go:431] installing /etc/kubernetes/addons/yakd-ns.yaml
I1014 13:39:52.674367 8300 ssh_runner.go:362] scp yakd/yakd-ns.yaml --> /etc/kubernetes/addons/yakd-ns.yaml (171 bytes)
I1014 13:39:52.674431 8300 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-002422
I1014 13:39:52.685876 8300 cli_runner.go:164] Run: docker container inspect addons-002422 --format={{.State.Status}}
I1014 13:39:52.726733 8300 out.go:177] - Using image nvcr.io/nvidia/k8s-device-plugin:v0.16.2
I1014 13:39:52.729479 8300 addons.go:431] installing /etc/kubernetes/addons/nvidia-device-plugin.yaml
I1014 13:39:52.729504 8300 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/nvidia-device-plugin.yaml (1966 bytes)
I1014 13:39:52.729571 8300 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-002422
I1014 13:39:52.738262 8300 out.go:177] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I1014 13:39:52.740620 8300 addons.go:431] installing /etc/kubernetes/addons/storage-provisioner.yaml
I1014 13:39:52.740706 8300 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I1014 13:39:52.740816 8300 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-002422
I1014 13:39:52.759125 8300 out.go:177] - Using image registry.k8s.io/metrics-server/metrics-server:v0.7.2
I1014 13:39:52.761903 8300 addons.go:431] installing /etc/kubernetes/addons/metrics-apiservice.yaml
I1014 13:39:52.761978 8300 ssh_runner.go:362] scp metrics-server/metrics-apiservice.yaml --> /etc/kubernetes/addons/metrics-apiservice.yaml (424 bytes)
I1014 13:39:52.762079 8300 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-002422
I1014 13:39:52.775806 8300 out.go:177] - Using image gcr.io/k8s-minikube/kube-registry-proxy:0.0.8
I1014 13:39:52.777050 8300 out.go:177] - Using image docker.io/registry:2.8.3
I1014 13:39:52.778795 8300 addons.go:431] installing /etc/kubernetes/addons/registry-rc.yaml
I1014 13:39:52.778815 8300 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-rc.yaml (860 bytes)
I1014 13:39:52.778876 8300 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-002422
I1014 13:39:52.810423 8300 out.go:177] - Using image registry.k8s.io/sig-storage/snapshot-controller:v6.1.0
W1014 13:39:52.810650 8300 out.go:270] ! Enabling 'volcano' returned an error: running callbacks: [volcano addon does not support crio]
I1014 13:39:52.843322 8300 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml
I1014 13:39:52.843343 8300 ssh_runner.go:362] scp volumesnapshots/csi-hostpath-snapshotclass.yaml --> /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml (934 bytes)
I1014 13:39:52.843404 8300 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-002422
I1014 13:39:52.875932 8300 addons.go:234] Setting addon storage-provisioner-rancher=true in "addons-002422"
I1014 13:39:52.875978 8300 host.go:66] Checking if "addons-002422" exists ...
I1014 13:39:52.876396 8300 cli_runner.go:164] Run: docker container inspect addons-002422 --format={{.State.Status}}
I1014 13:39:52.900015 8300 out.go:177] - Using image registry.k8s.io/sig-storage/csi-provisioner:v3.3.0
I1014 13:39:52.903825 8300 out.go:177] - Using image registry.k8s.io/sig-storage/csi-attacher:v4.0.0
I1014 13:39:52.905216 8300 out.go:177] - Using image registry.k8s.io/sig-storage/csi-external-health-monitor-controller:v0.7.0
I1014 13:39:52.905782 8300 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I1014 13:39:52.905912 8300 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19790-2228/.minikube/machines/addons-002422/id_rsa Username:docker}
I1014 13:39:52.907430 8300 addons.go:234] Setting addon default-storageclass=true in "addons-002422"
I1014 13:39:52.907470 8300 host.go:66] Checking if "addons-002422" exists ...
I1014 13:39:52.907882 8300 cli_runner.go:164] Run: docker container inspect addons-002422 --format={{.State.Status}}
I1014 13:39:52.909830 8300 out.go:177] - Using image docker.io/rocm/k8s-device-plugin:1.25.2.8
I1014 13:39:52.925147 8300 addons.go:431] installing /etc/kubernetes/addons/amd-gpu-device-plugin.yaml
I1014 13:39:52.925170 8300 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/amd-gpu-device-plugin.yaml (1868 bytes)
I1014 13:39:52.925233 8300 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-002422
I1014 13:39:52.960835 8300 out.go:177] - Using image gcr.io/cloud-spanner-emulator/emulator:1.5.24
I1014 13:39:52.962599 8300 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.4
I1014 13:39:52.962627 8300 out.go:177] - Using image gcr.io/k8s-minikube/minikube-ingress-dns:0.0.3
I1014 13:39:52.962620 8300 out.go:177] - Using image registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.6.0
I1014 13:39:52.963349 8300 addons.go:431] installing /etc/kubernetes/addons/deployment.yaml
I1014 13:39:52.963366 8300 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/deployment.yaml (1004 bytes)
I1014 13:39:52.963429 8300 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-002422
I1014 13:39:52.963206 8300 host.go:66] Checking if "addons-002422" exists ...
I1014 13:39:52.970289 8300 addons.go:431] installing /etc/kubernetes/addons/ingress-dns-pod.yaml
I1014 13:39:52.970310 8300 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ingress-dns-pod.yaml (2442 bytes)
I1014 13:39:52.970378 8300 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-002422
I1014 13:39:52.984524 8300 out.go:177] - Using image ghcr.io/inspektor-gadget/inspektor-gadget:v0.33.0
I1014 13:39:52.986563 8300 addons.go:431] installing /etc/kubernetes/addons/ig-crd.yaml
I1014 13:39:52.986585 8300 ssh_runner.go:362] scp inspektor-gadget/ig-crd.yaml --> /etc/kubernetes/addons/ig-crd.yaml (5248 bytes)
I1014 13:39:52.986665 8300 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-002422
I1014 13:39:53.000851 8300 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19790-2228/.minikube/machines/addons-002422/id_rsa Username:docker}
I1014 13:39:53.002898 8300 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19790-2228/.minikube/machines/addons-002422/id_rsa Username:docker}
I1014 13:39:53.004669 8300 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.4
I1014 13:39:53.005171 8300 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19790-2228/.minikube/machines/addons-002422/id_rsa Username:docker}
I1014 13:39:53.007617 8300 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19790-2228/.minikube/machines/addons-002422/id_rsa Username:docker}
I1014 13:39:53.008851 8300 out.go:177] - Using image registry.k8s.io/sig-storage/hostpathplugin:v1.9.0
I1014 13:39:53.011846 8300 out.go:177] - Using image registry.k8s.io/sig-storage/livenessprobe:v2.8.0
I1014 13:39:53.012163 8300 out.go:177] - Using image registry.k8s.io/ingress-nginx/controller:v1.11.3
I1014 13:39:53.014531 8300 addons.go:431] installing /etc/kubernetes/addons/ingress-deploy.yaml
I1014 13:39:53.014551 8300 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ingress-deploy.yaml (16078 bytes)
I1014 13:39:53.014610 8300 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-002422
I1014 13:39:53.017400 8300 out.go:177] - Using image registry.k8s.io/sig-storage/csi-resizer:v1.6.0
I1014 13:39:53.021178 8300 ssh_runner.go:195] Run: sudo systemctl start kubelet
I1014 13:39:53.032819 8300 out.go:177] - Using image registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0
I1014 13:39:53.048821 8300 addons.go:431] installing /etc/kubernetes/addons/rbac-external-attacher.yaml
I1014 13:39:53.050784 8300 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-attacher.yaml --> /etc/kubernetes/addons/rbac-external-attacher.yaml (3073 bytes)
I1014 13:39:53.051154 8300 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-002422
I1014 13:39:53.075156 8300 addons.go:431] installing /etc/kubernetes/addons/storageclass.yaml
I1014 13:39:53.075179 8300 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I1014 13:39:53.075238 8300 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-002422
I1014 13:39:53.118592 8300 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19790-2228/.minikube/machines/addons-002422/id_rsa Username:docker}
I1014 13:39:53.125109 8300 out.go:177] - Using image docker.io/rancher/local-path-provisioner:v0.0.22
I1014 13:39:53.127021 8300 out.go:177] - Using image docker.io/busybox:stable
I1014 13:39:53.128267 8300 addons.go:431] installing /etc/kubernetes/addons/storage-provisioner-rancher.yaml
I1014 13:39:53.128285 8300 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner-rancher.yaml (3113 bytes)
I1014 13:39:53.128347 8300 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-002422
I1014 13:39:53.149764 8300 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19790-2228/.minikube/machines/addons-002422/id_rsa Username:docker}
I1014 13:39:53.157878 8300 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19790-2228/.minikube/machines/addons-002422/id_rsa Username:docker}
I1014 13:39:53.173153 8300 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19790-2228/.minikube/machines/addons-002422/id_rsa Username:docker}
I1014 13:39:53.173565 8300 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19790-2228/.minikube/machines/addons-002422/id_rsa Username:docker}
I1014 13:39:53.210375 8300 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19790-2228/.minikube/machines/addons-002422/id_rsa Username:docker}
I1014 13:39:53.227983 8300 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19790-2228/.minikube/machines/addons-002422/id_rsa Username:docker}
I1014 13:39:53.228760 8300 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19790-2228/.minikube/machines/addons-002422/id_rsa Username:docker}
W1014 13:39:53.232143 8300 sshutil.go:64] dial failure (will retry): ssh: handshake failed: EOF
I1014 13:39:53.232171 8300 retry.go:31] will retry after 201.075001ms: ssh: handshake failed: EOF
I1014 13:39:53.251959 8300 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19790-2228/.minikube/machines/addons-002422/id_rsa Username:docker}
W1014 13:39:53.252826 8300 sshutil.go:64] dial failure (will retry): ssh: handshake failed: EOF
I1014 13:39:53.252854 8300 retry.go:31] will retry after 290.693438ms: ssh: handshake failed: EOF
I1014 13:39:53.321325 8300 addons.go:431] installing /etc/kubernetes/addons/yakd-sa.yaml
I1014 13:39:53.321400 8300 ssh_runner.go:362] scp yakd/yakd-sa.yaml --> /etc/kubernetes/addons/yakd-sa.yaml (247 bytes)
I1014 13:39:53.465529 8300 addons.go:431] installing /etc/kubernetes/addons/yakd-crb.yaml
I1014 13:39:53.465600 8300 ssh_runner.go:362] scp yakd/yakd-crb.yaml --> /etc/kubernetes/addons/yakd-crb.yaml (422 bytes)
I1014 13:39:53.521197 8300 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I1014 13:39:53.559324 8300 addons.go:431] installing /etc/kubernetes/addons/registry-svc.yaml
I1014 13:39:53.559396 8300 ssh_runner.go:362] scp registry/registry-svc.yaml --> /etc/kubernetes/addons/registry-svc.yaml (398 bytes)
I1014 13:39:53.588000 8300 addons.go:431] installing /etc/kubernetes/addons/metrics-server-deployment.yaml
I1014 13:39:53.588071 8300 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-deployment.yaml (1907 bytes)
I1014 13:39:53.596777 8300 addons.go:431] installing /etc/kubernetes/addons/yakd-svc.yaml
I1014 13:39:53.596867 8300 ssh_runner.go:362] scp yakd/yakd-svc.yaml --> /etc/kubernetes/addons/yakd-svc.yaml (412 bytes)
I1014 13:39:53.611889 8300 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/nvidia-device-plugin.yaml
I1014 13:39:53.691506 8300 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ingress-dns-pod.yaml
I1014 13:39:53.701027 8300 addons.go:431] installing /etc/kubernetes/addons/ig-deployment.yaml
I1014 13:39:53.701101 8300 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-deployment.yaml (14451 bytes)
I1014 13:39:53.706980 8300 addons.go:431] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml
I1014 13:39:53.707058 8300 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshotclasses.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml (6471 bytes)
I1014 13:39:53.721608 8300 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/deployment.yaml
I1014 13:39:53.726010 8300 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/amd-gpu-device-plugin.yaml
I1014 13:39:53.727214 8300 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I1014 13:39:53.748244 8300 addons.go:431] installing /etc/kubernetes/addons/metrics-server-rbac.yaml
I1014 13:39:53.748324 8300 ssh_runner.go:362] scp metrics-server/metrics-server-rbac.yaml --> /etc/kubernetes/addons/metrics-server-rbac.yaml (2175 bytes)
I1014 13:39:53.766870 8300 addons.go:431] installing /etc/kubernetes/addons/registry-proxy.yaml
I1014 13:39:53.766935 8300 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-proxy.yaml (947 bytes)
I1014 13:39:53.798159 8300 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner-rancher.yaml
I1014 13:39:53.807972 8300 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ingress-deploy.yaml
I1014 13:39:53.829894 8300 addons.go:431] installing /etc/kubernetes/addons/rbac-hostpath.yaml
I1014 13:39:53.829968 8300 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-hostpath.yaml --> /etc/kubernetes/addons/rbac-hostpath.yaml (4266 bytes)
I1014 13:39:53.831870 8300 addons.go:431] installing /etc/kubernetes/addons/yakd-dp.yaml
I1014 13:39:53.831936 8300 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/yakd-dp.yaml (2017 bytes)
I1014 13:39:53.904458 8300 addons.go:431] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml
I1014 13:39:53.904529 8300 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshotcontents.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml (23126 bytes)
I1014 13:39:53.915013 8300 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml
I1014 13:39:53.918332 8300 addons.go:431] installing /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml
I1014 13:39:53.918352 8300 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-health-monitor-controller.yaml --> /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml (3038 bytes)
I1014 13:39:53.967109 8300 addons.go:431] installing /etc/kubernetes/addons/metrics-server-service.yaml
I1014 13:39:53.967183 8300 ssh_runner.go:362] scp metrics-server/metrics-server-service.yaml --> /etc/kubernetes/addons/metrics-server-service.yaml (446 bytes)
I1014 13:39:53.995912 8300 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/yakd-ns.yaml -f /etc/kubernetes/addons/yakd-sa.yaml -f /etc/kubernetes/addons/yakd-crb.yaml -f /etc/kubernetes/addons/yakd-svc.yaml -f /etc/kubernetes/addons/yakd-dp.yaml
I1014 13:39:53.999172 8300 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/registry-rc.yaml -f /etc/kubernetes/addons/registry-svc.yaml -f /etc/kubernetes/addons/registry-proxy.yaml
I1014 13:39:54.008313 8300 addons.go:431] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml
I1014 13:39:54.008386 8300 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshots.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml (19582 bytes)
I1014 13:39:54.050016 8300 addons.go:431] installing /etc/kubernetes/addons/rbac-external-provisioner.yaml
I1014 13:39:54.050117 8300 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-provisioner.yaml --> /etc/kubernetes/addons/rbac-external-provisioner.yaml (4442 bytes)
I1014 13:39:54.150952 8300 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
I1014 13:39:54.158895 8300 addons.go:431] installing /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml
I1014 13:39:54.158973 8300 ssh_runner.go:362] scp volumesnapshots/rbac-volume-snapshot-controller.yaml --> /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml (3545 bytes)
I1014 13:39:54.216839 8300 addons.go:431] installing /etc/kubernetes/addons/rbac-external-resizer.yaml
I1014 13:39:54.216910 8300 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-resizer.yaml --> /etc/kubernetes/addons/rbac-external-resizer.yaml (2943 bytes)
I1014 13:39:54.298038 8300 addons.go:431] installing /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I1014 13:39:54.298106 8300 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml (1475 bytes)
I1014 13:39:54.425246 8300 addons.go:431] installing /etc/kubernetes/addons/rbac-external-snapshotter.yaml
I1014 13:39:54.425318 8300 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-snapshotter.yaml --> /etc/kubernetes/addons/rbac-external-snapshotter.yaml (3149 bytes)
I1014 13:39:54.518844 8300 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I1014 13:39:54.627793 8300 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-attacher.yaml
I1014 13:39:54.627858 8300 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-attacher.yaml (2143 bytes)
I1014 13:39:54.710668 8300 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml
I1014 13:39:54.710741 8300 ssh_runner.go:362] scp csi-hostpath-driver/deploy/csi-hostpath-driverinfo.yaml --> /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml (1274 bytes)
I1014 13:39:54.878400 8300 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-plugin.yaml
I1014 13:39:54.878468 8300 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-plugin.yaml (8201 bytes)
I1014 13:39:55.067607 8300 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-resizer.yaml
I1014 13:39:55.067709 8300 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-resizer.yaml (2191 bytes)
I1014 13:39:55.102129 8300 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -": (2.196317747s)
I1014 13:39:55.102343 8300 start.go:971] {"host.minikube.internal": 192.168.49.1} host record injected into CoreDNS's ConfigMap
I1014 13:39:55.102246 8300 ssh_runner.go:235] Completed: sudo systemctl start kubelet: (2.081032362s)
I1014 13:39:55.103403 8300 node_ready.go:35] waiting up to 6m0s for node "addons-002422" to be "Ready" ...
I1014 13:39:55.260571 8300 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-storageclass.yaml
I1014 13:39:55.260641 8300 ssh_runner.go:362] scp csi-hostpath-driver/deploy/csi-hostpath-storageclass.yaml --> /etc/kubernetes/addons/csi-hostpath-storageclass.yaml (846 bytes)
I1014 13:39:55.430413 8300 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/rbac-external-attacher.yaml -f /etc/kubernetes/addons/rbac-hostpath.yaml -f /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml -f /etc/kubernetes/addons/rbac-external-provisioner.yaml -f /etc/kubernetes/addons/rbac-external-resizer.yaml -f /etc/kubernetes/addons/rbac-external-snapshotter.yaml -f /etc/kubernetes/addons/csi-hostpath-attacher.yaml -f /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml -f /etc/kubernetes/addons/csi-hostpath-plugin.yaml -f /etc/kubernetes/addons/csi-hostpath-resizer.yaml -f /etc/kubernetes/addons/csi-hostpath-storageclass.yaml
I1014 13:39:56.191149 8300 kapi.go:214] "coredns" deployment in "kube-system" namespace and "addons-002422" context rescaled to 1 replicas
I1014 13:39:57.531843 8300 node_ready.go:53] node "addons-002422" has status "Ready":"False"
I1014 13:39:58.190385 8300 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/nvidia-device-plugin.yaml: (4.578417915s)
I1014 13:39:58.190449 8300 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ingress-dns-pod.yaml: (4.498880501s)
I1014 13:39:58.190488 8300 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/deployment.yaml: (4.468813955s)
I1014 13:39:58.190506 8300 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/amd-gpu-device-plugin.yaml: (4.46442961s)
I1014 13:39:58.190521 8300 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: (4.463252488s)
I1014 13:39:58.190651 8300 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (4.669384715s)
I1014 13:39:58.489703 8300 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner-rancher.yaml: (4.691446827s)
I1014 13:39:59.453746 8300 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ingress-deploy.yaml: (5.645701166s)
I1014 13:39:59.453825 8300 addons.go:475] Verifying addon ingress=true in "addons-002422"
I1014 13:39:59.454100 8300 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-deployment.yaml: (5.538956517s)
I1014 13:39:59.454193 8300 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/yakd-ns.yaml -f /etc/kubernetes/addons/yakd-sa.yaml -f /etc/kubernetes/addons/yakd-crb.yaml -f /etc/kubernetes/addons/yakd-svc.yaml -f /etc/kubernetes/addons/yakd-dp.yaml: (5.458209619s)
I1014 13:39:59.454257 8300 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/registry-rc.yaml -f /etc/kubernetes/addons/registry-svc.yaml -f /etc/kubernetes/addons/registry-proxy.yaml: (5.455024986s)
I1014 13:39:59.454450 8300 addons.go:475] Verifying addon registry=true in "addons-002422"
I1014 13:39:59.454313 8300 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: (5.303303753s)
I1014 13:39:59.454779 8300 addons.go:475] Verifying addon metrics-server=true in "addons-002422"
I1014 13:39:59.456620 8300 out.go:177] * Verifying registry addon...
I1014 13:39:59.456787 8300 out.go:177] * To access YAKD - Kubernetes Dashboard, wait for Pod to be ready and run the following command:
minikube -p addons-002422 service yakd-dashboard -n yakd-dashboard
I1014 13:39:59.456791 8300 out.go:177] * Verifying ingress addon...
I1014 13:39:59.460826 8300 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=registry" in ns "kube-system" ...
I1014 13:39:59.461823 8300 kapi.go:75] Waiting for pod with label "app.kubernetes.io/name=ingress-nginx" in ns "ingress-nginx" ...
I1014 13:39:59.470853 8300 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: (4.95192719s)
W1014 13:39:59.470897 8300 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: Process exited with status 1
stdout:
customresourcedefinition.apiextensions.k8s.io/volumesnapshotclasses.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshotcontents.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshots.snapshot.storage.k8s.io created
serviceaccount/snapshot-controller created
clusterrole.rbac.authorization.k8s.io/snapshot-controller-runner created
clusterrolebinding.rbac.authorization.k8s.io/snapshot-controller-role created
role.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
rolebinding.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
deployment.apps/snapshot-controller created
stderr:
error: resource mapping not found for name: "csi-hostpath-snapclass" namespace: "" from "/etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml": no matches for kind "VolumeSnapshotClass" in version "snapshot.storage.k8s.io/v1"
ensure CRDs are installed first
I1014 13:39:59.470916 8300 retry.go:31] will retry after 363.594794ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: Process exited with status 1
stdout:
customresourcedefinition.apiextensions.k8s.io/volumesnapshotclasses.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshotcontents.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshots.snapshot.storage.k8s.io created
serviceaccount/snapshot-controller created
clusterrole.rbac.authorization.k8s.io/snapshot-controller-runner created
clusterrolebinding.rbac.authorization.k8s.io/snapshot-controller-role created
role.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
rolebinding.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
deployment.apps/snapshot-controller created
stderr:
error: resource mapping not found for name: "csi-hostpath-snapclass" namespace: "" from "/etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml": no matches for kind "VolumeSnapshotClass" in version "snapshot.storage.k8s.io/v1"
ensure CRDs are installed first
I1014 13:39:59.475044 8300 kapi.go:86] Found 1 Pods for label selector kubernetes.io/minikube-addons=registry
I1014 13:39:59.475137 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:39:59.494186 8300 kapi.go:86] Found 3 Pods for label selector app.kubernetes.io/name=ingress-nginx
I1014 13:39:59.494260 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:39:59.618776 8300 node_ready.go:53] node "addons-002422" has status "Ready":"False"
I1014 13:39:59.674045 8300 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/rbac-external-attacher.yaml -f /etc/kubernetes/addons/rbac-hostpath.yaml -f /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml -f /etc/kubernetes/addons/rbac-external-provisioner.yaml -f /etc/kubernetes/addons/rbac-external-resizer.yaml -f /etc/kubernetes/addons/rbac-external-snapshotter.yaml -f /etc/kubernetes/addons/csi-hostpath-attacher.yaml -f /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml -f /etc/kubernetes/addons/csi-hostpath-plugin.yaml -f /etc/kubernetes/addons/csi-hostpath-resizer.yaml -f /etc/kubernetes/addons/csi-hostpath-storageclass.yaml: (4.243504428s)
I1014 13:39:59.674122 8300 addons.go:475] Verifying addon csi-hostpath-driver=true in "addons-002422"
I1014 13:39:59.677091 8300 out.go:177] * Verifying csi-hostpath-driver addon...
I1014 13:39:59.680601 8300 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=csi-hostpath-driver" in ns "kube-system" ...
I1014 13:39:59.688079 8300 kapi.go:86] Found 2 Pods for label selector kubernetes.io/minikube-addons=csi-hostpath-driver
I1014 13:39:59.688106 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:39:59.834651 8300 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply --force -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I1014 13:39:59.966110 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:39:59.967159 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:00.182897 8300 ssh_runner.go:362] scp memory --> /var/lib/minikube/google_application_credentials.json (162 bytes)
I1014 13:40:00.183061 8300 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-002422
I1014 13:40:00.208777 8300 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19790-2228/.minikube/machines/addons-002422/id_rsa Username:docker}
I1014 13:40:00.215133 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:00.327148 8300 ssh_runner.go:362] scp memory --> /var/lib/minikube/google_cloud_project (12 bytes)
I1014 13:40:00.348098 8300 addons.go:234] Setting addon gcp-auth=true in "addons-002422"
I1014 13:40:00.348157 8300 host.go:66] Checking if "addons-002422" exists ...
I1014 13:40:00.348646 8300 cli_runner.go:164] Run: docker container inspect addons-002422 --format={{.State.Status}}
I1014 13:40:00.371447 8300 ssh_runner.go:195] Run: cat /var/lib/minikube/google_application_credentials.json
I1014 13:40:00.371503 8300 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-002422
I1014 13:40:00.390256 8300 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19790-2228/.minikube/machines/addons-002422/id_rsa Username:docker}
I1014 13:40:00.465620 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:00.466509 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:00.685248 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:00.963894 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:00.965769 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:01.184031 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:01.465535 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:01.465945 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:01.685003 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:01.964337 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:01.965880 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:02.107412 8300 node_ready.go:53] node "addons-002422" has status "Ready":"False"
I1014 13:40:02.184392 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:02.467313 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:02.468545 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:02.630593 8300 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply --force -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: (2.795900604s)
I1014 13:40:02.630657 8300 ssh_runner.go:235] Completed: cat /var/lib/minikube/google_application_credentials.json: (2.259182574s)
I1014 13:40:02.633830 8300 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.4
I1014 13:40:02.636777 8300 out.go:177] - Using image gcr.io/k8s-minikube/gcp-auth-webhook:v0.1.3
I1014 13:40:02.639988 8300 addons.go:431] installing /etc/kubernetes/addons/gcp-auth-ns.yaml
I1014 13:40:02.640015 8300 ssh_runner.go:362] scp gcp-auth/gcp-auth-ns.yaml --> /etc/kubernetes/addons/gcp-auth-ns.yaml (700 bytes)
I1014 13:40:02.658660 8300 addons.go:431] installing /etc/kubernetes/addons/gcp-auth-service.yaml
I1014 13:40:02.658723 8300 ssh_runner.go:362] scp gcp-auth/gcp-auth-service.yaml --> /etc/kubernetes/addons/gcp-auth-service.yaml (788 bytes)
I1014 13:40:02.677247 8300 addons.go:431] installing /etc/kubernetes/addons/gcp-auth-webhook.yaml
I1014 13:40:02.677270 8300 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/gcp-auth-webhook.yaml (5421 bytes)
I1014 13:40:02.686328 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:02.697294 8300 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/gcp-auth-ns.yaml -f /etc/kubernetes/addons/gcp-auth-service.yaml -f /etc/kubernetes/addons/gcp-auth-webhook.yaml
I1014 13:40:02.965552 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:02.966886 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:03.206469 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:03.211991 8300 addons.go:475] Verifying addon gcp-auth=true in "addons-002422"
I1014 13:40:03.215047 8300 out.go:177] * Verifying gcp-auth addon...
I1014 13:40:03.218536 8300 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=gcp-auth" in ns "gcp-auth" ...
I1014 13:40:03.230067 8300 kapi.go:86] Found 1 Pods for label selector kubernetes.io/minikube-addons=gcp-auth
I1014 13:40:03.230140 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:03.464794 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:03.465865 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:03.684409 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:03.722413 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:03.964910 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:03.965754 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:04.184001 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:04.222224 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:04.464132 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:04.465835 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:04.607095 8300 node_ready.go:53] node "addons-002422" has status "Ready":"False"
I1014 13:40:04.684540 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:04.722062 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:04.965026 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:04.966122 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:05.184931 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:05.222359 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:05.464871 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:05.465614 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:05.685221 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:05.722755 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:05.965721 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:05.966755 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:06.184612 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:06.226391 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:06.466295 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:06.466835 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:06.684279 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:06.722387 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:06.964517 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:06.966650 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:07.107318 8300 node_ready.go:53] node "addons-002422" has status "Ready":"False"
I1014 13:40:07.184262 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:07.221898 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:07.464023 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:07.465715 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:07.684126 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:07.722266 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:07.964971 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:07.966311 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:08.185249 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:08.221712 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:08.465735 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:08.466519 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:08.643449 8300 node_ready.go:49] node "addons-002422" has status "Ready":"True"
I1014 13:40:08.643475 8300 node_ready.go:38] duration metric: took 13.540005457s for node "addons-002422" to be "Ready" ...
I1014 13:40:08.643486 8300 pod_ready.go:36] extra waiting up to 6m0s for all system-critical pods including labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I1014 13:40:08.703756 8300 pod_ready.go:79] waiting up to 6m0s for pod "coredns-7c65d6cfc9-bsnhb" in "kube-system" namespace to be "Ready" ...
I1014 13:40:08.713250 8300 kapi.go:86] Found 3 Pods for label selector kubernetes.io/minikube-addons=csi-hostpath-driver
I1014 13:40:08.713277 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:08.737645 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:09.027476 8300 kapi.go:86] Found 2 Pods for label selector kubernetes.io/minikube-addons=registry
I1014 13:40:09.027504 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:09.028465 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:09.192685 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:09.235778 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:09.470625 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:09.471644 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:09.686336 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:09.724017 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:09.968520 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:09.969677 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:10.185754 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:10.284757 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:10.464657 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:10.468881 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:10.710741 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:10.770593 8300 pod_ready.go:103] pod "coredns-7c65d6cfc9-bsnhb" in "kube-system" namespace has status "Ready":"False"
I1014 13:40:10.794079 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:10.965702 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:10.967342 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:11.185841 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:11.222030 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:11.467066 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:11.469206 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:11.685166 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:11.711397 8300 pod_ready.go:93] pod "coredns-7c65d6cfc9-bsnhb" in "kube-system" namespace has status "Ready":"True"
I1014 13:40:11.711422 8300 pod_ready.go:82] duration metric: took 3.007626472s for pod "coredns-7c65d6cfc9-bsnhb" in "kube-system" namespace to be "Ready" ...
I1014 13:40:11.711456 8300 pod_ready.go:79] waiting up to 6m0s for pod "etcd-addons-002422" in "kube-system" namespace to be "Ready" ...
I1014 13:40:11.716836 8300 pod_ready.go:93] pod "etcd-addons-002422" in "kube-system" namespace has status "Ready":"True"
I1014 13:40:11.716863 8300 pod_ready.go:82] duration metric: took 5.39615ms for pod "etcd-addons-002422" in "kube-system" namespace to be "Ready" ...
I1014 13:40:11.716879 8300 pod_ready.go:79] waiting up to 6m0s for pod "kube-apiserver-addons-002422" in "kube-system" namespace to be "Ready" ...
I1014 13:40:11.722185 8300 pod_ready.go:93] pod "kube-apiserver-addons-002422" in "kube-system" namespace has status "Ready":"True"
I1014 13:40:11.722216 8300 pod_ready.go:82] duration metric: took 5.329212ms for pod "kube-apiserver-addons-002422" in "kube-system" namespace to be "Ready" ...
I1014 13:40:11.722228 8300 pod_ready.go:79] waiting up to 6m0s for pod "kube-controller-manager-addons-002422" in "kube-system" namespace to be "Ready" ...
I1014 13:40:11.722840 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:11.726976 8300 pod_ready.go:93] pod "kube-controller-manager-addons-002422" in "kube-system" namespace has status "Ready":"True"
I1014 13:40:11.726999 8300 pod_ready.go:82] duration metric: took 4.763003ms for pod "kube-controller-manager-addons-002422" in "kube-system" namespace to be "Ready" ...
I1014 13:40:11.727014 8300 pod_ready.go:79] waiting up to 6m0s for pod "kube-proxy-l8cm8" in "kube-system" namespace to be "Ready" ...
I1014 13:40:11.732189 8300 pod_ready.go:93] pod "kube-proxy-l8cm8" in "kube-system" namespace has status "Ready":"True"
I1014 13:40:11.732216 8300 pod_ready.go:82] duration metric: took 5.194263ms for pod "kube-proxy-l8cm8" in "kube-system" namespace to be "Ready" ...
I1014 13:40:11.732230 8300 pod_ready.go:79] waiting up to 6m0s for pod "kube-scheduler-addons-002422" in "kube-system" namespace to be "Ready" ...
I1014 13:40:11.965558 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:11.965832 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:12.107679 8300 pod_ready.go:93] pod "kube-scheduler-addons-002422" in "kube-system" namespace has status "Ready":"True"
I1014 13:40:12.107702 8300 pod_ready.go:82] duration metric: took 375.464914ms for pod "kube-scheduler-addons-002422" in "kube-system" namespace to be "Ready" ...
I1014 13:40:12.107715 8300 pod_ready.go:79] waiting up to 6m0s for pod "metrics-server-84c5f94fbc-p68nc" in "kube-system" namespace to be "Ready" ...
I1014 13:40:12.187450 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:12.230307 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:12.467870 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:12.469141 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:12.686181 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:12.726472 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:12.968632 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:12.971453 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:13.186734 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:13.288379 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:13.468833 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:13.469572 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:13.691085 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:13.722796 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:13.967320 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:13.968732 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:14.123790 8300 pod_ready.go:103] pod "metrics-server-84c5f94fbc-p68nc" in "kube-system" namespace has status "Ready":"False"
I1014 13:40:14.188791 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:14.223008 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:14.473340 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:14.476242 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:14.699309 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:14.728731 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:14.971976 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:14.972724 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:15.185896 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:15.222496 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:15.467542 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:15.468799 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:15.686904 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:15.786475 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:15.966125 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:15.967040 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:16.185797 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:16.221978 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:16.466570 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:16.467388 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:16.614049 8300 pod_ready.go:103] pod "metrics-server-84c5f94fbc-p68nc" in "kube-system" namespace has status "Ready":"False"
I1014 13:40:16.685524 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:16.722681 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:16.964684 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:16.967332 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:17.186668 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:17.223243 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:17.466297 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:17.466753 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:17.686530 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:17.722651 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:17.970506 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:17.972546 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:18.186497 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:18.222309 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:18.466524 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:18.467462 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:18.614247 8300 pod_ready.go:103] pod "metrics-server-84c5f94fbc-p68nc" in "kube-system" namespace has status "Ready":"False"
I1014 13:40:18.686853 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:18.721947 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:18.965775 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:18.966738 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:19.185186 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:19.222274 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:19.464580 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:19.466106 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:19.685875 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:19.721911 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:19.964671 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:19.966618 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:20.189311 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:20.222657 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:20.472199 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:20.473727 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:20.618297 8300 pod_ready.go:103] pod "metrics-server-84c5f94fbc-p68nc" in "kube-system" namespace has status "Ready":"False"
I1014 13:40:20.686013 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:20.725067 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:20.965365 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:20.967580 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:21.186187 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:21.222418 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:21.466257 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:21.468884 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:21.695334 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:21.722420 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:21.967200 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:21.968432 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:22.186507 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:22.223515 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:22.467776 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:22.468823 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:22.686524 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:22.722555 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:22.966992 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:22.968380 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:23.127726 8300 pod_ready.go:103] pod "metrics-server-84c5f94fbc-p68nc" in "kube-system" namespace has status "Ready":"False"
I1014 13:40:23.186075 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:23.221928 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:23.472480 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:23.475893 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:23.685670 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:23.722116 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:23.964675 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:23.966307 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:24.185949 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:24.221763 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:24.466929 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:24.467840 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:24.686500 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:24.723357 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:24.964654 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:24.966885 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:25.186770 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:25.224264 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:25.468835 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:25.472989 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:25.615515 8300 pod_ready.go:103] pod "metrics-server-84c5f94fbc-p68nc" in "kube-system" namespace has status "Ready":"False"
I1014 13:40:25.685771 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:25.785531 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:25.966812 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:25.968108 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:26.187678 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:26.221680 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:26.469421 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:26.471831 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:26.686271 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:26.722724 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:26.967053 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:26.970512 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:27.185477 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:27.222867 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:27.466250 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:27.468931 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:27.688567 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:27.722803 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:27.967279 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:27.968910 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:28.120174 8300 pod_ready.go:103] pod "metrics-server-84c5f94fbc-p68nc" in "kube-system" namespace has status "Ready":"False"
I1014 13:40:28.185963 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:28.222451 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:28.467731 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:28.469892 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:28.694484 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:28.723733 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:28.968844 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:28.970243 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:29.188565 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:29.222632 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:29.465237 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:29.470081 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:29.686314 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:29.721820 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:29.967077 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:29.968097 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:30.123378 8300 pod_ready.go:103] pod "metrics-server-84c5f94fbc-p68nc" in "kube-system" namespace has status "Ready":"False"
I1014 13:40:30.186144 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:30.222392 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:30.467612 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:30.468606 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:30.685939 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:30.722292 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:30.966292 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:30.966498 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:31.187119 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:31.228397 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:31.469474 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:31.470647 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:31.687793 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:31.722951 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:31.966353 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:31.968952 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:32.191012 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:32.222245 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:32.493783 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:32.497471 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:32.616729 8300 pod_ready.go:103] pod "metrics-server-84c5f94fbc-p68nc" in "kube-system" namespace has status "Ready":"False"
I1014 13:40:32.685402 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:32.723199 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:33.015091 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:33.016225 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:33.186185 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:33.222121 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:33.464925 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:33.466305 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:33.685313 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:33.721937 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:33.965674 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:33.966000 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:34.188134 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:34.222521 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:34.465926 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:34.466210 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:34.685495 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:34.721879 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:34.967054 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:34.967585 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:35.123958 8300 pod_ready.go:103] pod "metrics-server-84c5f94fbc-p68nc" in "kube-system" namespace has status "Ready":"False"
I1014 13:40:35.186850 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:35.222328 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:35.474600 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:35.476259 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:35.688267 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:35.722698 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:35.966461 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:35.968904 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:36.189405 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:36.227105 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:36.467374 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:36.469201 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:36.687641 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:36.723276 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:36.966516 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:36.966899 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:37.186027 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:37.221954 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:37.466693 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I1014 13:40:37.467661 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:37.613599 8300 pod_ready.go:103] pod "metrics-server-84c5f94fbc-p68nc" in "kube-system" namespace has status "Ready":"False"
I1014 13:40:37.685542 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:37.722282 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:37.964239 8300 kapi.go:107] duration metric: took 38.50341222s to wait for kubernetes.io/minikube-addons=registry ...
I1014 13:40:37.966629 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:38.185126 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:38.221942 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:38.466888 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:38.686438 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:38.722778 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:38.966517 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:39.187282 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:39.223207 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:39.466661 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:39.614336 8300 pod_ready.go:103] pod "metrics-server-84c5f94fbc-p68nc" in "kube-system" namespace has status "Ready":"False"
I1014 13:40:39.691196 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:39.787651 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:39.966764 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:40.186084 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:40.222886 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:40.468879 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:40.687211 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:40.722327 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:40.968203 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:41.186447 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:41.222002 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:41.467195 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:41.614734 8300 pod_ready.go:103] pod "metrics-server-84c5f94fbc-p68nc" in "kube-system" namespace has status "Ready":"False"
I1014 13:40:41.686099 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:41.722065 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:41.966707 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:42.185957 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:42.222263 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:42.466761 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:42.685295 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:42.722139 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:42.965961 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:43.185566 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:43.222396 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:43.466206 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:43.685170 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:43.722217 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:43.965841 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:44.119018 8300 pod_ready.go:103] pod "metrics-server-84c5f94fbc-p68nc" in "kube-system" namespace has status "Ready":"False"
I1014 13:40:44.185599 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:44.222258 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:44.472427 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:44.685456 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:44.722820 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:44.967061 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:45.186268 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:45.222582 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:45.467133 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:45.686006 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:45.722745 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:45.966725 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:46.122727 8300 pod_ready.go:103] pod "metrics-server-84c5f94fbc-p68nc" in "kube-system" namespace has status "Ready":"False"
I1014 13:40:46.185705 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:46.222478 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:46.470580 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:46.686007 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:46.723722 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:46.968777 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:47.189929 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:47.230753 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:47.468104 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:47.686508 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:47.735612 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:47.967384 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:48.186115 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:48.222574 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:48.467660 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:48.613831 8300 pod_ready.go:103] pod "metrics-server-84c5f94fbc-p68nc" in "kube-system" namespace has status "Ready":"False"
I1014 13:40:48.686770 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:48.723180 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:48.967036 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:49.186235 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:49.222254 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:49.466998 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:49.688335 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:49.785244 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:49.965851 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:50.185449 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:50.222201 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:50.466967 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:50.615703 8300 pod_ready.go:103] pod "metrics-server-84c5f94fbc-p68nc" in "kube-system" namespace has status "Ready":"False"
I1014 13:40:50.688793 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:50.723351 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:50.967653 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:51.187020 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:51.222617 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:51.466384 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:51.685756 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:51.722480 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:51.966614 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:52.185552 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:52.221973 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:52.467049 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:52.685690 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:52.721869 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:52.966897 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:53.117849 8300 pod_ready.go:103] pod "metrics-server-84c5f94fbc-p68nc" in "kube-system" namespace has status "Ready":"False"
I1014 13:40:53.185524 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:53.222512 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:53.466694 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:53.685231 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:53.726031 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:53.966413 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:54.186863 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:54.222376 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:54.466654 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:54.686527 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:54.722195 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:54.967206 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:55.120495 8300 pod_ready.go:103] pod "metrics-server-84c5f94fbc-p68nc" in "kube-system" namespace has status "Ready":"False"
I1014 13:40:55.186636 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:55.222945 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:55.466704 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:55.686587 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:55.723177 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:55.967943 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:56.185945 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:56.230424 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:56.467580 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:56.686756 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:56.729151 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:56.967597 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:57.138863 8300 pod_ready.go:103] pod "metrics-server-84c5f94fbc-p68nc" in "kube-system" namespace has status "Ready":"False"
I1014 13:40:57.187768 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:57.222620 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:57.467322 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:57.686340 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:57.722924 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:57.973862 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:58.187396 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:58.223433 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:58.471576 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:58.696283 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:58.722845 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:58.969617 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:59.144349 8300 pod_ready.go:103] pod "metrics-server-84c5f94fbc-p68nc" in "kube-system" namespace has status "Ready":"False"
I1014 13:40:59.187326 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:59.224062 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:59.468904 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:40:59.685865 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:40:59.723300 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:40:59.967201 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:41:00.187328 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:00.223296 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:41:00.469948 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:41:00.689322 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:00.787865 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:41:00.967220 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:41:01.193121 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:01.223356 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:41:01.466076 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:41:01.616801 8300 pod_ready.go:103] pod "metrics-server-84c5f94fbc-p68nc" in "kube-system" namespace has status "Ready":"False"
I1014 13:41:01.685600 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:01.721282 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:41:01.968555 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:41:02.185627 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:02.221744 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:41:02.466461 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:41:02.685728 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:02.721776 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:41:02.966641 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:41:03.186738 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:03.222869 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:41:03.469224 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:41:03.685717 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:03.721948 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:41:03.967116 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:41:04.123040 8300 pod_ready.go:103] pod "metrics-server-84c5f94fbc-p68nc" in "kube-system" namespace has status "Ready":"False"
I1014 13:41:04.186633 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:04.224324 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:41:04.466978 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:41:04.686272 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:04.722968 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:41:04.966310 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:41:05.185602 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:05.221985 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:41:05.466644 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:41:05.688078 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:05.722513 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:41:05.968444 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:41:06.193617 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:06.221743 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:41:06.466302 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:41:06.619047 8300 pod_ready.go:103] pod "metrics-server-84c5f94fbc-p68nc" in "kube-system" namespace has status "Ready":"False"
I1014 13:41:06.687322 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:06.723683 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:41:06.966537 8300 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I1014 13:41:07.188570 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:07.222504 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:41:07.468497 8300 kapi.go:107] duration metric: took 1m8.006680509s to wait for app.kubernetes.io/name=ingress-nginx ...
I1014 13:41:07.685477 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:07.721750 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:41:08.186738 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:08.222996 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:41:08.619965 8300 pod_ready.go:103] pod "metrics-server-84c5f94fbc-p68nc" in "kube-system" namespace has status "Ready":"False"
I1014 13:41:08.686078 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:08.722502 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:41:09.186302 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:09.222663 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:41:09.685631 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:09.721891 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:41:10.186594 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:10.222211 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:41:10.686002 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:10.722457 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:41:11.130059 8300 pod_ready.go:103] pod "metrics-server-84c5f94fbc-p68nc" in "kube-system" namespace has status "Ready":"False"
I1014 13:41:11.186102 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:11.222916 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:41:11.684891 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:11.722141 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:41:12.193144 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:12.222686 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:41:12.689014 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:12.788604 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:41:13.186121 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:13.222010 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:41:13.613504 8300 pod_ready.go:103] pod "metrics-server-84c5f94fbc-p68nc" in "kube-system" namespace has status "Ready":"False"
I1014 13:41:13.687201 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:13.722294 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:41:14.186028 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:14.223174 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:41:14.693278 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:14.722092 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:41:15.190467 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:15.224301 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:41:15.614338 8300 pod_ready.go:103] pod "metrics-server-84c5f94fbc-p68nc" in "kube-system" namespace has status "Ready":"False"
I1014 13:41:15.685666 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:15.722373 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:41:16.185958 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:16.222136 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:41:16.685847 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:16.722238 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:41:17.185998 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:17.221972 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:41:17.685169 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:17.721825 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:41:18.123627 8300 pod_ready.go:103] pod "metrics-server-84c5f94fbc-p68nc" in "kube-system" namespace has status "Ready":"False"
I1014 13:41:18.187717 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:18.222369 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:41:18.685613 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:18.727245 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:41:19.122743 8300 pod_ready.go:93] pod "metrics-server-84c5f94fbc-p68nc" in "kube-system" namespace has status "Ready":"True"
I1014 13:41:19.122765 8300 pod_ready.go:82] duration metric: took 1m7.015042829s for pod "metrics-server-84c5f94fbc-p68nc" in "kube-system" namespace to be "Ready" ...
I1014 13:41:19.122779 8300 pod_ready.go:79] waiting up to 6m0s for pod "nvidia-device-plugin-daemonset-tnngr" in "kube-system" namespace to be "Ready" ...
I1014 13:41:19.132407 8300 pod_ready.go:93] pod "nvidia-device-plugin-daemonset-tnngr" in "kube-system" namespace has status "Ready":"True"
I1014 13:41:19.132479 8300 pod_ready.go:82] duration metric: took 9.69201ms for pod "nvidia-device-plugin-daemonset-tnngr" in "kube-system" namespace to be "Ready" ...
I1014 13:41:19.132516 8300 pod_ready.go:39] duration metric: took 1m10.489017042s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I1014 13:41:19.132561 8300 api_server.go:52] waiting for apiserver process to appear ...
I1014 13:41:19.132608 8300 cri.go:54] listing CRI containers in root : {State:all Name:kube-apiserver Namespaces:[]}
I1014 13:41:19.132693 8300 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1014 13:41:19.190302 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:19.211811 8300 cri.go:89] found id: "8b5eecbb1fe82d3ce49c4e32d7e54fd8dad0e826a894d6005ed7aac0c04bef74"
I1014 13:41:19.211879 8300 cri.go:89] found id: ""
I1014 13:41:19.211901 8300 logs.go:282] 1 containers: [8b5eecbb1fe82d3ce49c4e32d7e54fd8dad0e826a894d6005ed7aac0c04bef74]
I1014 13:41:19.211982 8300 ssh_runner.go:195] Run: which crictl
I1014 13:41:19.220526 8300 cri.go:54] listing CRI containers in root : {State:all Name:etcd Namespaces:[]}
I1014 13:41:19.220641 8300 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1014 13:41:19.241773 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I1014 13:41:19.276339 8300 cri.go:89] found id: "1028165ec062157439f733ab6a35f8de542a7bec1f3b417ae6d993ec6d72f896"
I1014 13:41:19.276410 8300 cri.go:89] found id: ""
I1014 13:41:19.276433 8300 logs.go:282] 1 containers: [1028165ec062157439f733ab6a35f8de542a7bec1f3b417ae6d993ec6d72f896]
I1014 13:41:19.276519 8300 ssh_runner.go:195] Run: which crictl
I1014 13:41:19.280479 8300 cri.go:54] listing CRI containers in root : {State:all Name:coredns Namespaces:[]}
I1014 13:41:19.280599 8300 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1014 13:41:19.351441 8300 cri.go:89] found id: "ada184f93dd5b79075f6ed44ddcb44b635fe24c3c69ea498e797aabea7f5ee5f"
I1014 13:41:19.351484 8300 cri.go:89] found id: ""
I1014 13:41:19.351493 8300 logs.go:282] 1 containers: [ada184f93dd5b79075f6ed44ddcb44b635fe24c3c69ea498e797aabea7f5ee5f]
I1014 13:41:19.351555 8300 ssh_runner.go:195] Run: which crictl
I1014 13:41:19.355570 8300 cri.go:54] listing CRI containers in root : {State:all Name:kube-scheduler Namespaces:[]}
I1014 13:41:19.355656 8300 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1014 13:41:19.413270 8300 cri.go:89] found id: "62098d11724974c824d47af9ef75592c9f29ddecba7112f8fd3fed3c259db4b8"
I1014 13:41:19.413304 8300 cri.go:89] found id: ""
I1014 13:41:19.413313 8300 logs.go:282] 1 containers: [62098d11724974c824d47af9ef75592c9f29ddecba7112f8fd3fed3c259db4b8]
I1014 13:41:19.413381 8300 ssh_runner.go:195] Run: which crictl
I1014 13:41:19.420849 8300 cri.go:54] listing CRI containers in root : {State:all Name:kube-proxy Namespaces:[]}
I1014 13:41:19.420934 8300 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1014 13:41:19.483334 8300 cri.go:89] found id: "09ddfab546738d1eab72c46ef4b7d84c7c88574b12d387c037cceeaf1a909255"
I1014 13:41:19.483358 8300 cri.go:89] found id: ""
I1014 13:41:19.483382 8300 logs.go:282] 1 containers: [09ddfab546738d1eab72c46ef4b7d84c7c88574b12d387c037cceeaf1a909255]
I1014 13:41:19.483446 8300 ssh_runner.go:195] Run: which crictl
I1014 13:41:19.487618 8300 cri.go:54] listing CRI containers in root : {State:all Name:kube-controller-manager Namespaces:[]}
I1014 13:41:19.487717 8300 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1014 13:41:19.551083 8300 cri.go:89] found id: "3e4cf70c881841234f19b88ecc5497bac13aae34c6605d6a448de2ce998ca7a8"
I1014 13:41:19.551142 8300 cri.go:89] found id: ""
I1014 13:41:19.551158 8300 logs.go:282] 1 containers: [3e4cf70c881841234f19b88ecc5497bac13aae34c6605d6a448de2ce998ca7a8]
I1014 13:41:19.551215 8300 ssh_runner.go:195] Run: which crictl
I1014 13:41:19.554787 8300 cri.go:54] listing CRI containers in root : {State:all Name:kindnet Namespaces:[]}
I1014 13:41:19.554860 8300 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1014 13:41:19.598310 8300 cri.go:89] found id: "47e55f64e180ffb927512f85e202bb19ab2a989edeae9f3711eb8b4b9204e17e"
I1014 13:41:19.598380 8300 cri.go:89] found id: ""
I1014 13:41:19.598395 8300 logs.go:282] 1 containers: [47e55f64e180ffb927512f85e202bb19ab2a989edeae9f3711eb8b4b9204e17e]
I1014 13:41:19.598462 8300 ssh_runner.go:195] Run: which crictl
I1014 13:41:19.601905 8300 logs.go:123] Gathering logs for container status ...
I1014 13:41:19.601926 8300 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1014 13:41:19.665148 8300 logs.go:123] Gathering logs for dmesg ...
I1014 13:41:19.665224 8300 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1014 13:41:19.682907 8300 logs.go:123] Gathering logs for coredns [ada184f93dd5b79075f6ed44ddcb44b635fe24c3c69ea498e797aabea7f5ee5f] ...
I1014 13:41:19.682933 8300 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 ada184f93dd5b79075f6ed44ddcb44b635fe24c3c69ea498e797aabea7f5ee5f"
I1014 13:41:19.687721 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:19.722161 8300 kapi.go:107] duration metric: took 1m16.503621459s to wait for kubernetes.io/minikube-addons=gcp-auth ...
I1014 13:41:19.725432 8300 out.go:177] * Your GCP credentials will now be mounted into every pod created in the addons-002422 cluster.
I1014 13:41:19.728014 8300 out.go:177] * If you don't want your credentials mounted into a specific pod, add a label with the `gcp-auth-skip-secret` key to your pod configuration.
I1014 13:41:19.730593 8300 out.go:177] * If you want existing pods to be mounted with credentials, either recreate them or rerun addons enable with --refresh.
I1014 13:41:19.735037 8300 logs.go:123] Gathering logs for kube-proxy [09ddfab546738d1eab72c46ef4b7d84c7c88574b12d387c037cceeaf1a909255] ...
I1014 13:41:19.735065 8300 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 09ddfab546738d1eab72c46ef4b7d84c7c88574b12d387c037cceeaf1a909255"
I1014 13:41:19.820881 8300 logs.go:123] Gathering logs for kube-controller-manager [3e4cf70c881841234f19b88ecc5497bac13aae34c6605d6a448de2ce998ca7a8] ...
I1014 13:41:19.820907 8300 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 3e4cf70c881841234f19b88ecc5497bac13aae34c6605d6a448de2ce998ca7a8"
I1014 13:41:19.893884 8300 logs.go:123] Gathering logs for kindnet [47e55f64e180ffb927512f85e202bb19ab2a989edeae9f3711eb8b4b9204e17e] ...
I1014 13:41:19.893917 8300 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 47e55f64e180ffb927512f85e202bb19ab2a989edeae9f3711eb8b4b9204e17e"
I1014 13:41:19.942315 8300 logs.go:123] Gathering logs for CRI-O ...
I1014 13:41:19.942345 8300 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u crio -n 400"
I1014 13:41:20.038083 8300 logs.go:123] Gathering logs for kubelet ...
I1014 13:41:20.038174 8300 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
W1014 13:41:20.115418 8300 logs.go:138] Found kubelet problem: Oct 14 13:40:08 addons-002422 kubelet[1493]: W1014 13:40:08.630422 1493 reflector.go:561] object-"kube-system"/"coredns": failed to list *v1.ConfigMap: configmaps "coredns" is forbidden: User "system:node:addons-002422" cannot list resource "configmaps" in API group "" in the namespace "kube-system": no relationship found between node 'addons-002422' and this object
W1014 13:41:20.115710 8300 logs.go:138] Found kubelet problem: Oct 14 13:40:08 addons-002422 kubelet[1493]: E1014 13:40:08.630469 1493 reflector.go:158] "Unhandled Error" err="object-\"kube-system\"/\"coredns\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"coredns\" is forbidden: User \"system:node:addons-002422\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\": no relationship found between node 'addons-002422' and this object" logger="UnhandledError"
W1014 13:41:20.115919 8300 logs.go:138] Found kubelet problem: Oct 14 13:40:08 addons-002422 kubelet[1493]: W1014 13:40:08.631414 1493 reflector.go:561] object-"gcp-auth"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:addons-002422" cannot list resource "configmaps" in API group "" in the namespace "gcp-auth": no relationship found between node 'addons-002422' and this object
W1014 13:41:20.116169 8300 logs.go:138] Found kubelet problem: Oct 14 13:40:08 addons-002422 kubelet[1493]: E1014 13:40:08.631450 1493 reflector.go:158] "Unhandled Error" err="object-\"gcp-auth\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:addons-002422\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"gcp-auth\": no relationship found between node 'addons-002422' and this object" logger="UnhandledError"
W1014 13:41:20.116400 8300 logs.go:138] Found kubelet problem: Oct 14 13:40:08 addons-002422 kubelet[1493]: W1014 13:40:08.631773 1493 reflector.go:561] object-"yakd-dashboard"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:addons-002422" cannot list resource "configmaps" in API group "" in the namespace "yakd-dashboard": no relationship found between node 'addons-002422' and this object
W1014 13:41:20.116656 8300 logs.go:138] Found kubelet problem: Oct 14 13:40:08 addons-002422 kubelet[1493]: E1014 13:40:08.631801 1493 reflector.go:158] "Unhandled Error" err="object-\"yakd-dashboard\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:addons-002422\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"yakd-dashboard\": no relationship found between node 'addons-002422' and this object" logger="UnhandledError"
I1014 13:41:20.152836 8300 logs.go:123] Gathering logs for describe nodes ...
I1014 13:41:20.152914 8300 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
I1014 13:41:20.186646 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:20.356145 8300 logs.go:123] Gathering logs for kube-apiserver [8b5eecbb1fe82d3ce49c4e32d7e54fd8dad0e826a894d6005ed7aac0c04bef74] ...
I1014 13:41:20.356173 8300 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 8b5eecbb1fe82d3ce49c4e32d7e54fd8dad0e826a894d6005ed7aac0c04bef74"
I1014 13:41:20.412553 8300 logs.go:123] Gathering logs for etcd [1028165ec062157439f733ab6a35f8de542a7bec1f3b417ae6d993ec6d72f896] ...
I1014 13:41:20.412587 8300 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 1028165ec062157439f733ab6a35f8de542a7bec1f3b417ae6d993ec6d72f896"
I1014 13:41:20.475037 8300 logs.go:123] Gathering logs for kube-scheduler [62098d11724974c824d47af9ef75592c9f29ddecba7112f8fd3fed3c259db4b8] ...
I1014 13:41:20.475086 8300 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 62098d11724974c824d47af9ef75592c9f29ddecba7112f8fd3fed3c259db4b8"
I1014 13:41:20.544679 8300 out.go:358] Setting ErrFile to fd 2...
I1014 13:41:20.544710 8300 out.go:392] TERM=,COLORTERM=, which probably does not support color
W1014 13:41:20.545077 8300 out.go:270] X Problems detected in kubelet:
W1014 13:41:20.545098 8300 out.go:270] Oct 14 13:40:08 addons-002422 kubelet[1493]: E1014 13:40:08.630469 1493 reflector.go:158] "Unhandled Error" err="object-\"kube-system\"/\"coredns\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"coredns\" is forbidden: User \"system:node:addons-002422\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\": no relationship found between node 'addons-002422' and this object" logger="UnhandledError"
W1014 13:41:20.545105 8300 out.go:270] Oct 14 13:40:08 addons-002422 kubelet[1493]: W1014 13:40:08.631414 1493 reflector.go:561] object-"gcp-auth"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:addons-002422" cannot list resource "configmaps" in API group "" in the namespace "gcp-auth": no relationship found between node 'addons-002422' and this object
W1014 13:41:20.545119 8300 out.go:270] Oct 14 13:40:08 addons-002422 kubelet[1493]: E1014 13:40:08.631450 1493 reflector.go:158] "Unhandled Error" err="object-\"gcp-auth\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:addons-002422\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"gcp-auth\": no relationship found between node 'addons-002422' and this object" logger="UnhandledError"
W1014 13:41:20.545254 8300 out.go:270] Oct 14 13:40:08 addons-002422 kubelet[1493]: W1014 13:40:08.631773 1493 reflector.go:561] object-"yakd-dashboard"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:addons-002422" cannot list resource "configmaps" in API group "" in the namespace "yakd-dashboard": no relationship found between node 'addons-002422' and this object
W1014 13:41:20.545261 8300 out.go:270] Oct 14 13:40:08 addons-002422 kubelet[1493]: E1014 13:40:08.631801 1493 reflector.go:158] "Unhandled Error" err="object-\"yakd-dashboard\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:addons-002422\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"yakd-dashboard\": no relationship found between node 'addons-002422' and this object" logger="UnhandledError"
I1014 13:41:20.545269 8300 out.go:358] Setting ErrFile to fd 2...
I1014 13:41:20.545282 8300 out.go:392] TERM=,COLORTERM=, which probably does not support color
I1014 13:41:20.686796 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:21.186158 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:21.686118 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:22.186247 8300 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I1014 13:41:22.685819 8300 kapi.go:107] duration metric: took 1m23.005216361s to wait for kubernetes.io/minikube-addons=csi-hostpath-driver ...
I1014 13:41:22.688880 8300 out.go:177] * Enabled addons: nvidia-device-plugin, ingress-dns, cloud-spanner, amd-gpu-device-plugin, storage-provisioner, default-storageclass, storage-provisioner-rancher, inspektor-gadget, metrics-server, yakd, volumesnapshots, registry, ingress, gcp-auth, csi-hostpath-driver
I1014 13:41:22.691714 8300 addons.go:510] duration metric: took 1m30.184000639s for enable addons: enabled=[nvidia-device-plugin ingress-dns cloud-spanner amd-gpu-device-plugin storage-provisioner default-storageclass storage-provisioner-rancher inspektor-gadget metrics-server yakd volumesnapshots registry ingress gcp-auth csi-hostpath-driver]
I1014 13:41:30.546877 8300 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I1014 13:41:30.560342 8300 api_server.go:72] duration metric: took 1m38.053028566s to wait for apiserver process to appear ...
I1014 13:41:30.560367 8300 api_server.go:88] waiting for apiserver healthz status ...
I1014 13:41:30.560402 8300 cri.go:54] listing CRI containers in root : {State:all Name:kube-apiserver Namespaces:[]}
I1014 13:41:30.560461 8300 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1014 13:41:30.601242 8300 cri.go:89] found id: "8b5eecbb1fe82d3ce49c4e32d7e54fd8dad0e826a894d6005ed7aac0c04bef74"
I1014 13:41:30.601265 8300 cri.go:89] found id: ""
I1014 13:41:30.601273 8300 logs.go:282] 1 containers: [8b5eecbb1fe82d3ce49c4e32d7e54fd8dad0e826a894d6005ed7aac0c04bef74]
I1014 13:41:30.601326 8300 ssh_runner.go:195] Run: which crictl
I1014 13:41:30.604628 8300 cri.go:54] listing CRI containers in root : {State:all Name:etcd Namespaces:[]}
I1014 13:41:30.604697 8300 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1014 13:41:30.644984 8300 cri.go:89] found id: "1028165ec062157439f733ab6a35f8de542a7bec1f3b417ae6d993ec6d72f896"
I1014 13:41:30.645003 8300 cri.go:89] found id: ""
I1014 13:41:30.645011 8300 logs.go:282] 1 containers: [1028165ec062157439f733ab6a35f8de542a7bec1f3b417ae6d993ec6d72f896]
I1014 13:41:30.645062 8300 ssh_runner.go:195] Run: which crictl
I1014 13:41:30.648469 8300 cri.go:54] listing CRI containers in root : {State:all Name:coredns Namespaces:[]}
I1014 13:41:30.648536 8300 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1014 13:41:30.697128 8300 cri.go:89] found id: "ada184f93dd5b79075f6ed44ddcb44b635fe24c3c69ea498e797aabea7f5ee5f"
I1014 13:41:30.697146 8300 cri.go:89] found id: ""
I1014 13:41:30.697153 8300 logs.go:282] 1 containers: [ada184f93dd5b79075f6ed44ddcb44b635fe24c3c69ea498e797aabea7f5ee5f]
I1014 13:41:30.697205 8300 ssh_runner.go:195] Run: which crictl
I1014 13:41:30.700974 8300 cri.go:54] listing CRI containers in root : {State:all Name:kube-scheduler Namespaces:[]}
I1014 13:41:30.701035 8300 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1014 13:41:30.740346 8300 cri.go:89] found id: "62098d11724974c824d47af9ef75592c9f29ddecba7112f8fd3fed3c259db4b8"
I1014 13:41:30.740369 8300 cri.go:89] found id: ""
I1014 13:41:30.740376 8300 logs.go:282] 1 containers: [62098d11724974c824d47af9ef75592c9f29ddecba7112f8fd3fed3c259db4b8]
I1014 13:41:30.740429 8300 ssh_runner.go:195] Run: which crictl
I1014 13:41:30.743903 8300 cri.go:54] listing CRI containers in root : {State:all Name:kube-proxy Namespaces:[]}
I1014 13:41:30.743969 8300 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1014 13:41:30.783592 8300 cri.go:89] found id: "09ddfab546738d1eab72c46ef4b7d84c7c88574b12d387c037cceeaf1a909255"
I1014 13:41:30.783616 8300 cri.go:89] found id: ""
I1014 13:41:30.783624 8300 logs.go:282] 1 containers: [09ddfab546738d1eab72c46ef4b7d84c7c88574b12d387c037cceeaf1a909255]
I1014 13:41:30.783677 8300 ssh_runner.go:195] Run: which crictl
I1014 13:41:30.787072 8300 cri.go:54] listing CRI containers in root : {State:all Name:kube-controller-manager Namespaces:[]}
I1014 13:41:30.787151 8300 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1014 13:41:30.823473 8300 cri.go:89] found id: "3e4cf70c881841234f19b88ecc5497bac13aae34c6605d6a448de2ce998ca7a8"
I1014 13:41:30.823549 8300 cri.go:89] found id: ""
I1014 13:41:30.823572 8300 logs.go:282] 1 containers: [3e4cf70c881841234f19b88ecc5497bac13aae34c6605d6a448de2ce998ca7a8]
I1014 13:41:30.823651 8300 ssh_runner.go:195] Run: which crictl
I1014 13:41:30.827113 8300 cri.go:54] listing CRI containers in root : {State:all Name:kindnet Namespaces:[]}
I1014 13:41:30.827178 8300 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1014 13:41:30.865127 8300 cri.go:89] found id: "47e55f64e180ffb927512f85e202bb19ab2a989edeae9f3711eb8b4b9204e17e"
I1014 13:41:30.865151 8300 cri.go:89] found id: ""
I1014 13:41:30.865161 8300 logs.go:282] 1 containers: [47e55f64e180ffb927512f85e202bb19ab2a989edeae9f3711eb8b4b9204e17e]
I1014 13:41:30.865215 8300 ssh_runner.go:195] Run: which crictl
I1014 13:41:30.869618 8300 logs.go:123] Gathering logs for dmesg ...
I1014 13:41:30.869641 8300 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1014 13:41:30.883538 8300 logs.go:123] Gathering logs for describe nodes ...
I1014 13:41:30.883565 8300 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
I1014 13:41:31.015963 8300 logs.go:123] Gathering logs for kube-scheduler [62098d11724974c824d47af9ef75592c9f29ddecba7112f8fd3fed3c259db4b8] ...
I1014 13:41:31.015993 8300 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 62098d11724974c824d47af9ef75592c9f29ddecba7112f8fd3fed3c259db4b8"
I1014 13:41:31.062622 8300 logs.go:123] Gathering logs for kindnet [47e55f64e180ffb927512f85e202bb19ab2a989edeae9f3711eb8b4b9204e17e] ...
I1014 13:41:31.062651 8300 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 47e55f64e180ffb927512f85e202bb19ab2a989edeae9f3711eb8b4b9204e17e"
I1014 13:41:31.105622 8300 logs.go:123] Gathering logs for container status ...
I1014 13:41:31.105652 8300 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1014 13:41:31.159051 8300 logs.go:123] Gathering logs for CRI-O ...
I1014 13:41:31.159121 8300 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u crio -n 400"
I1014 13:41:31.251884 8300 logs.go:123] Gathering logs for kubelet ...
I1014 13:41:31.251917 8300 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
W1014 13:41:31.324970 8300 logs.go:138] Found kubelet problem: Oct 14 13:40:08 addons-002422 kubelet[1493]: W1014 13:40:08.630422 1493 reflector.go:561] object-"kube-system"/"coredns": failed to list *v1.ConfigMap: configmaps "coredns" is forbidden: User "system:node:addons-002422" cannot list resource "configmaps" in API group "" in the namespace "kube-system": no relationship found between node 'addons-002422' and this object
W1014 13:41:31.325225 8300 logs.go:138] Found kubelet problem: Oct 14 13:40:08 addons-002422 kubelet[1493]: E1014 13:40:08.630469 1493 reflector.go:158] "Unhandled Error" err="object-\"kube-system\"/\"coredns\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"coredns\" is forbidden: User \"system:node:addons-002422\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\": no relationship found between node 'addons-002422' and this object" logger="UnhandledError"
W1014 13:41:31.325410 8300 logs.go:138] Found kubelet problem: Oct 14 13:40:08 addons-002422 kubelet[1493]: W1014 13:40:08.631414 1493 reflector.go:561] object-"gcp-auth"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:addons-002422" cannot list resource "configmaps" in API group "" in the namespace "gcp-auth": no relationship found between node 'addons-002422' and this object
W1014 13:41:31.325632 8300 logs.go:138] Found kubelet problem: Oct 14 13:40:08 addons-002422 kubelet[1493]: E1014 13:40:08.631450 1493 reflector.go:158] "Unhandled Error" err="object-\"gcp-auth\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:addons-002422\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"gcp-auth\": no relationship found between node 'addons-002422' and this object" logger="UnhandledError"
W1014 13:41:31.325817 8300 logs.go:138] Found kubelet problem: Oct 14 13:40:08 addons-002422 kubelet[1493]: W1014 13:40:08.631773 1493 reflector.go:561] object-"yakd-dashboard"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:addons-002422" cannot list resource "configmaps" in API group "" in the namespace "yakd-dashboard": no relationship found between node 'addons-002422' and this object
W1014 13:41:31.326041 8300 logs.go:138] Found kubelet problem: Oct 14 13:40:08 addons-002422 kubelet[1493]: E1014 13:40:08.631801 1493 reflector.go:158] "Unhandled Error" err="object-\"yakd-dashboard\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:addons-002422\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"yakd-dashboard\": no relationship found between node 'addons-002422' and this object" logger="UnhandledError"
I1014 13:41:31.362482 8300 logs.go:123] Gathering logs for kube-apiserver [8b5eecbb1fe82d3ce49c4e32d7e54fd8dad0e826a894d6005ed7aac0c04bef74] ...
I1014 13:41:31.362509 8300 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 8b5eecbb1fe82d3ce49c4e32d7e54fd8dad0e826a894d6005ed7aac0c04bef74"
I1014 13:41:31.418997 8300 logs.go:123] Gathering logs for etcd [1028165ec062157439f733ab6a35f8de542a7bec1f3b417ae6d993ec6d72f896] ...
I1014 13:41:31.419027 8300 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 1028165ec062157439f733ab6a35f8de542a7bec1f3b417ae6d993ec6d72f896"
I1014 13:41:31.467919 8300 logs.go:123] Gathering logs for coredns [ada184f93dd5b79075f6ed44ddcb44b635fe24c3c69ea498e797aabea7f5ee5f] ...
I1014 13:41:31.467949 8300 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 ada184f93dd5b79075f6ed44ddcb44b635fe24c3c69ea498e797aabea7f5ee5f"
I1014 13:41:31.507864 8300 logs.go:123] Gathering logs for kube-proxy [09ddfab546738d1eab72c46ef4b7d84c7c88574b12d387c037cceeaf1a909255] ...
I1014 13:41:31.507894 8300 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 09ddfab546738d1eab72c46ef4b7d84c7c88574b12d387c037cceeaf1a909255"
I1014 13:41:31.548235 8300 logs.go:123] Gathering logs for kube-controller-manager [3e4cf70c881841234f19b88ecc5497bac13aae34c6605d6a448de2ce998ca7a8] ...
I1014 13:41:31.548260 8300 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 3e4cf70c881841234f19b88ecc5497bac13aae34c6605d6a448de2ce998ca7a8"
I1014 13:41:31.618475 8300 out.go:358] Setting ErrFile to fd 2...
I1014 13:41:31.618509 8300 out.go:392] TERM=,COLORTERM=, which probably does not support color
W1014 13:41:31.618562 8300 out.go:270] X Problems detected in kubelet:
W1014 13:41:31.618572 8300 out.go:270] Oct 14 13:40:08 addons-002422 kubelet[1493]: E1014 13:40:08.630469 1493 reflector.go:158] "Unhandled Error" err="object-\"kube-system\"/\"coredns\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"coredns\" is forbidden: User \"system:node:addons-002422\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\": no relationship found between node 'addons-002422' and this object" logger="UnhandledError"
W1014 13:41:31.618580 8300 out.go:270] Oct 14 13:40:08 addons-002422 kubelet[1493]: W1014 13:40:08.631414 1493 reflector.go:561] object-"gcp-auth"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:addons-002422" cannot list resource "configmaps" in API group "" in the namespace "gcp-auth": no relationship found between node 'addons-002422' and this object
W1014 13:41:31.618593 8300 out.go:270] Oct 14 13:40:08 addons-002422 kubelet[1493]: E1014 13:40:08.631450 1493 reflector.go:158] "Unhandled Error" err="object-\"gcp-auth\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:addons-002422\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"gcp-auth\": no relationship found between node 'addons-002422' and this object" logger="UnhandledError"
W1014 13:41:31.618601 8300 out.go:270] Oct 14 13:40:08 addons-002422 kubelet[1493]: W1014 13:40:08.631773 1493 reflector.go:561] object-"yakd-dashboard"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:addons-002422" cannot list resource "configmaps" in API group "" in the namespace "yakd-dashboard": no relationship found between node 'addons-002422' and this object
W1014 13:41:31.618611 8300 out.go:270] Oct 14 13:40:08 addons-002422 kubelet[1493]: E1014 13:40:08.631801 1493 reflector.go:158] "Unhandled Error" err="object-\"yakd-dashboard\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:addons-002422\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"yakd-dashboard\": no relationship found between node 'addons-002422' and this object" logger="UnhandledError"
I1014 13:41:31.618619 8300 out.go:358] Setting ErrFile to fd 2...
I1014 13:41:31.618625 8300 out.go:392] TERM=,COLORTERM=, which probably does not support color
I1014 13:41:41.619260 8300 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8443/healthz ...
I1014 13:41:41.627573 8300 api_server.go:279] https://192.168.49.2:8443/healthz returned 200:
ok
I1014 13:41:41.628499 8300 api_server.go:141] control plane version: v1.31.1
I1014 13:41:41.628521 8300 api_server.go:131] duration metric: took 11.068146645s to wait for apiserver health ...
I1014 13:41:41.628529 8300 system_pods.go:43] waiting for kube-system pods to appear ...
I1014 13:41:41.628550 8300 cri.go:54] listing CRI containers in root : {State:all Name:kube-apiserver Namespaces:[]}
I1014 13:41:41.628613 8300 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I1014 13:41:41.665965 8300 cri.go:89] found id: "8b5eecbb1fe82d3ce49c4e32d7e54fd8dad0e826a894d6005ed7aac0c04bef74"
I1014 13:41:41.665995 8300 cri.go:89] found id: ""
I1014 13:41:41.666002 8300 logs.go:282] 1 containers: [8b5eecbb1fe82d3ce49c4e32d7e54fd8dad0e826a894d6005ed7aac0c04bef74]
I1014 13:41:41.666056 8300 ssh_runner.go:195] Run: which crictl
I1014 13:41:41.669487 8300 cri.go:54] listing CRI containers in root : {State:all Name:etcd Namespaces:[]}
I1014 13:41:41.669557 8300 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I1014 13:41:41.708562 8300 cri.go:89] found id: "1028165ec062157439f733ab6a35f8de542a7bec1f3b417ae6d993ec6d72f896"
I1014 13:41:41.708585 8300 cri.go:89] found id: ""
I1014 13:41:41.708593 8300 logs.go:282] 1 containers: [1028165ec062157439f733ab6a35f8de542a7bec1f3b417ae6d993ec6d72f896]
I1014 13:41:41.708646 8300 ssh_runner.go:195] Run: which crictl
I1014 13:41:41.712178 8300 cri.go:54] listing CRI containers in root : {State:all Name:coredns Namespaces:[]}
I1014 13:41:41.712246 8300 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I1014 13:41:41.775326 8300 cri.go:89] found id: "ada184f93dd5b79075f6ed44ddcb44b635fe24c3c69ea498e797aabea7f5ee5f"
I1014 13:41:41.775347 8300 cri.go:89] found id: ""
I1014 13:41:41.775355 8300 logs.go:282] 1 containers: [ada184f93dd5b79075f6ed44ddcb44b635fe24c3c69ea498e797aabea7f5ee5f]
I1014 13:41:41.775408 8300 ssh_runner.go:195] Run: which crictl
I1014 13:41:41.779511 8300 cri.go:54] listing CRI containers in root : {State:all Name:kube-scheduler Namespaces:[]}
I1014 13:41:41.779615 8300 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I1014 13:41:41.821335 8300 cri.go:89] found id: "62098d11724974c824d47af9ef75592c9f29ddecba7112f8fd3fed3c259db4b8"
I1014 13:41:41.821356 8300 cri.go:89] found id: ""
I1014 13:41:41.821363 8300 logs.go:282] 1 containers: [62098d11724974c824d47af9ef75592c9f29ddecba7112f8fd3fed3c259db4b8]
I1014 13:41:41.821450 8300 ssh_runner.go:195] Run: which crictl
I1014 13:41:41.825710 8300 cri.go:54] listing CRI containers in root : {State:all Name:kube-proxy Namespaces:[]}
I1014 13:41:41.825820 8300 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I1014 13:41:41.865087 8300 cri.go:89] found id: "09ddfab546738d1eab72c46ef4b7d84c7c88574b12d387c037cceeaf1a909255"
I1014 13:41:41.865108 8300 cri.go:89] found id: ""
I1014 13:41:41.865116 8300 logs.go:282] 1 containers: [09ddfab546738d1eab72c46ef4b7d84c7c88574b12d387c037cceeaf1a909255]
I1014 13:41:41.865169 8300 ssh_runner.go:195] Run: which crictl
I1014 13:41:41.868563 8300 cri.go:54] listing CRI containers in root : {State:all Name:kube-controller-manager Namespaces:[]}
I1014 13:41:41.868634 8300 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I1014 13:41:41.907304 8300 cri.go:89] found id: "3e4cf70c881841234f19b88ecc5497bac13aae34c6605d6a448de2ce998ca7a8"
I1014 13:41:41.907327 8300 cri.go:89] found id: ""
I1014 13:41:41.907335 8300 logs.go:282] 1 containers: [3e4cf70c881841234f19b88ecc5497bac13aae34c6605d6a448de2ce998ca7a8]
I1014 13:41:41.907391 8300 ssh_runner.go:195] Run: which crictl
I1014 13:41:41.910857 8300 cri.go:54] listing CRI containers in root : {State:all Name:kindnet Namespaces:[]}
I1014 13:41:41.910930 8300 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I1014 13:41:41.949718 8300 cri.go:89] found id: "47e55f64e180ffb927512f85e202bb19ab2a989edeae9f3711eb8b4b9204e17e"
I1014 13:41:41.949744 8300 cri.go:89] found id: ""
I1014 13:41:41.949752 8300 logs.go:282] 1 containers: [47e55f64e180ffb927512f85e202bb19ab2a989edeae9f3711eb8b4b9204e17e]
I1014 13:41:41.949805 8300 ssh_runner.go:195] Run: which crictl
I1014 13:41:41.953310 8300 logs.go:123] Gathering logs for kindnet [47e55f64e180ffb927512f85e202bb19ab2a989edeae9f3711eb8b4b9204e17e] ...
I1014 13:41:41.953338 8300 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 47e55f64e180ffb927512f85e202bb19ab2a989edeae9f3711eb8b4b9204e17e"
I1014 13:41:41.996585 8300 logs.go:123] Gathering logs for container status ...
I1014 13:41:41.996615 8300 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I1014 13:41:42.050322 8300 logs.go:123] Gathering logs for kubelet ...
I1014 13:41:42.050352 8300 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
W1014 13:41:42.135143 8300 logs.go:138] Found kubelet problem: Oct 14 13:40:08 addons-002422 kubelet[1493]: W1014 13:40:08.630422 1493 reflector.go:561] object-"kube-system"/"coredns": failed to list *v1.ConfigMap: configmaps "coredns" is forbidden: User "system:node:addons-002422" cannot list resource "configmaps" in API group "" in the namespace "kube-system": no relationship found between node 'addons-002422' and this object
W1014 13:41:42.135373 8300 logs.go:138] Found kubelet problem: Oct 14 13:40:08 addons-002422 kubelet[1493]: E1014 13:40:08.630469 1493 reflector.go:158] "Unhandled Error" err="object-\"kube-system\"/\"coredns\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"coredns\" is forbidden: User \"system:node:addons-002422\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\": no relationship found between node 'addons-002422' and this object" logger="UnhandledError"
W1014 13:41:42.135558 8300 logs.go:138] Found kubelet problem: Oct 14 13:40:08 addons-002422 kubelet[1493]: W1014 13:40:08.631414 1493 reflector.go:561] object-"gcp-auth"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:addons-002422" cannot list resource "configmaps" in API group "" in the namespace "gcp-auth": no relationship found between node 'addons-002422' and this object
W1014 13:41:42.135780 8300 logs.go:138] Found kubelet problem: Oct 14 13:40:08 addons-002422 kubelet[1493]: E1014 13:40:08.631450 1493 reflector.go:158] "Unhandled Error" err="object-\"gcp-auth\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:addons-002422\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"gcp-auth\": no relationship found between node 'addons-002422' and this object" logger="UnhandledError"
W1014 13:41:42.135963 8300 logs.go:138] Found kubelet problem: Oct 14 13:40:08 addons-002422 kubelet[1493]: W1014 13:40:08.631773 1493 reflector.go:561] object-"yakd-dashboard"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:addons-002422" cannot list resource "configmaps" in API group "" in the namespace "yakd-dashboard": no relationship found between node 'addons-002422' and this object
W1014 13:41:42.136185 8300 logs.go:138] Found kubelet problem: Oct 14 13:40:08 addons-002422 kubelet[1493]: E1014 13:40:08.631801 1493 reflector.go:158] "Unhandled Error" err="object-\"yakd-dashboard\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:addons-002422\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"yakd-dashboard\": no relationship found between node 'addons-002422' and this object" logger="UnhandledError"
I1014 13:41:42.175445 8300 logs.go:123] Gathering logs for etcd [1028165ec062157439f733ab6a35f8de542a7bec1f3b417ae6d993ec6d72f896] ...
I1014 13:41:42.175490 8300 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 1028165ec062157439f733ab6a35f8de542a7bec1f3b417ae6d993ec6d72f896"
I1014 13:41:42.232021 8300 logs.go:123] Gathering logs for kube-scheduler [62098d11724974c824d47af9ef75592c9f29ddecba7112f8fd3fed3c259db4b8] ...
I1014 13:41:42.232058 8300 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 62098d11724974c824d47af9ef75592c9f29ddecba7112f8fd3fed3c259db4b8"
I1014 13:41:42.276952 8300 logs.go:123] Gathering logs for kube-proxy [09ddfab546738d1eab72c46ef4b7d84c7c88574b12d387c037cceeaf1a909255] ...
I1014 13:41:42.276988 8300 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 09ddfab546738d1eab72c46ef4b7d84c7c88574b12d387c037cceeaf1a909255"
I1014 13:41:42.319634 8300 logs.go:123] Gathering logs for kube-controller-manager [3e4cf70c881841234f19b88ecc5497bac13aae34c6605d6a448de2ce998ca7a8] ...
I1014 13:41:42.319660 8300 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 3e4cf70c881841234f19b88ecc5497bac13aae34c6605d6a448de2ce998ca7a8"
I1014 13:41:42.396472 8300 logs.go:123] Gathering logs for CRI-O ...
I1014 13:41:42.396507 8300 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u crio -n 400"
I1014 13:41:42.493405 8300 logs.go:123] Gathering logs for dmesg ...
I1014 13:41:42.493438 8300 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I1014 13:41:42.505382 8300 logs.go:123] Gathering logs for describe nodes ...
I1014 13:41:42.505410 8300 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.1/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
I1014 13:41:42.639254 8300 logs.go:123] Gathering logs for kube-apiserver [8b5eecbb1fe82d3ce49c4e32d7e54fd8dad0e826a894d6005ed7aac0c04bef74] ...
I1014 13:41:42.639286 8300 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 8b5eecbb1fe82d3ce49c4e32d7e54fd8dad0e826a894d6005ed7aac0c04bef74"
I1014 13:41:42.707467 8300 logs.go:123] Gathering logs for coredns [ada184f93dd5b79075f6ed44ddcb44b635fe24c3c69ea498e797aabea7f5ee5f] ...
I1014 13:41:42.707498 8300 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 ada184f93dd5b79075f6ed44ddcb44b635fe24c3c69ea498e797aabea7f5ee5f"
I1014 13:41:42.750023 8300 out.go:358] Setting ErrFile to fd 2...
I1014 13:41:42.750051 8300 out.go:392] TERM=,COLORTERM=, which probably does not support color
W1014 13:41:42.750110 8300 out.go:270] X Problems detected in kubelet:
W1014 13:41:42.750126 8300 out.go:270] Oct 14 13:40:08 addons-002422 kubelet[1493]: E1014 13:40:08.630469 1493 reflector.go:158] "Unhandled Error" err="object-\"kube-system\"/\"coredns\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"coredns\" is forbidden: User \"system:node:addons-002422\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\": no relationship found between node 'addons-002422' and this object" logger="UnhandledError"
W1014 13:41:42.750135 8300 out.go:270] Oct 14 13:40:08 addons-002422 kubelet[1493]: W1014 13:40:08.631414 1493 reflector.go:561] object-"gcp-auth"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:addons-002422" cannot list resource "configmaps" in API group "" in the namespace "gcp-auth": no relationship found between node 'addons-002422' and this object
W1014 13:41:42.750146 8300 out.go:270] Oct 14 13:40:08 addons-002422 kubelet[1493]: E1014 13:40:08.631450 1493 reflector.go:158] "Unhandled Error" err="object-\"gcp-auth\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:addons-002422\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"gcp-auth\": no relationship found between node 'addons-002422' and this object" logger="UnhandledError"
W1014 13:41:42.750153 8300 out.go:270] Oct 14 13:40:08 addons-002422 kubelet[1493]: W1014 13:40:08.631773 1493 reflector.go:561] object-"yakd-dashboard"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:addons-002422" cannot list resource "configmaps" in API group "" in the namespace "yakd-dashboard": no relationship found between node 'addons-002422' and this object
W1014 13:41:42.750205 8300 out.go:270] Oct 14 13:40:08 addons-002422 kubelet[1493]: E1014 13:40:08.631801 1493 reflector.go:158] "Unhandled Error" err="object-\"yakd-dashboard\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:addons-002422\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"yakd-dashboard\": no relationship found between node 'addons-002422' and this object" logger="UnhandledError"
I1014 13:41:42.750211 8300 out.go:358] Setting ErrFile to fd 2...
I1014 13:41:42.750218 8300 out.go:392] TERM=,COLORTERM=, which probably does not support color
I1014 13:41:52.761041 8300 system_pods.go:59] 18 kube-system pods found
I1014 13:41:52.761086 8300 system_pods.go:61] "coredns-7c65d6cfc9-bsnhb" [1719c402-d9cd-43d4-af23-a0333df02866] Running
I1014 13:41:52.761095 8300 system_pods.go:61] "csi-hostpath-attacher-0" [1e5df543-7e1e-48cb-9857-ad4fa55eecc3] Running
I1014 13:41:52.761101 8300 system_pods.go:61] "csi-hostpath-resizer-0" [3aacd79a-b371-4b56-bf98-d444c83b9439] Running
I1014 13:41:52.761128 8300 system_pods.go:61] "csi-hostpathplugin-jrvhl" [cd5f386d-cfc5-4dc6-9ec6-5643a4184f8c] Running
I1014 13:41:52.761139 8300 system_pods.go:61] "etcd-addons-002422" [055ec4e6-1017-4a4e-be4f-7a71bf7807a4] Running
I1014 13:41:52.761144 8300 system_pods.go:61] "kindnet-xjsm2" [e0634e3a-e89d-46c3-befa-fa9f56e48570] Running
I1014 13:41:52.761149 8300 system_pods.go:61] "kube-apiserver-addons-002422" [125f5bf2-9f9b-4b6f-b862-494aa9801820] Running
I1014 13:41:52.761153 8300 system_pods.go:61] "kube-controller-manager-addons-002422" [a31d6a59-7270-4061-92a4-5065ef2d5330] Running
I1014 13:41:52.761165 8300 system_pods.go:61] "kube-ingress-dns-minikube" [85b77aed-3ee1-4f75-97b3-879fb269f534] Running
I1014 13:41:52.761169 8300 system_pods.go:61] "kube-proxy-l8cm8" [c57ee3d5-8ab2-46bd-b68b-80f6c3904d40] Running
I1014 13:41:52.761174 8300 system_pods.go:61] "kube-scheduler-addons-002422" [1dc281ca-83cd-4762-9821-4e17445ccfea] Running
I1014 13:41:52.761180 8300 system_pods.go:61] "metrics-server-84c5f94fbc-p68nc" [344d0c1c-bbea-4de6-a079-724c18606d38] Running
I1014 13:41:52.761185 8300 system_pods.go:61] "nvidia-device-plugin-daemonset-tnngr" [a113dbce-1d95-437b-83fc-dd34499d10e4] Running
I1014 13:41:52.761210 8300 system_pods.go:61] "registry-66c9cd494c-ddkrt" [091b0f03-dc90-4b2b-bbd3-c73a13edd832] Running
I1014 13:41:52.761220 8300 system_pods.go:61] "registry-proxy-wjht4" [7f1138a2-5ec8-4c04-a3b7-fdb6c0af33aa] Running
I1014 13:41:52.761224 8300 system_pods.go:61] "snapshot-controller-56fcc65765-d9p5h" [272bc704-122e-4ffe-a624-e7051cb8832f] Running
I1014 13:41:52.761229 8300 system_pods.go:61] "snapshot-controller-56fcc65765-pq9xk" [c3e18049-be5f-43ff-a507-33cabb741de9] Running
I1014 13:41:52.761236 8300 system_pods.go:61] "storage-provisioner" [832679c2-ca50-4565-b1cd-90c63d11988b] Running
I1014 13:41:52.761243 8300 system_pods.go:74] duration metric: took 11.132707132s to wait for pod list to return data ...
I1014 13:41:52.761252 8300 default_sa.go:34] waiting for default service account to be created ...
I1014 13:41:52.763788 8300 default_sa.go:45] found service account: "default"
I1014 13:41:52.763813 8300 default_sa.go:55] duration metric: took 2.550674ms for default service account to be created ...
I1014 13:41:52.763822 8300 system_pods.go:116] waiting for k8s-apps to be running ...
I1014 13:41:52.773891 8300 system_pods.go:86] 18 kube-system pods found
I1014 13:41:52.773928 8300 system_pods.go:89] "coredns-7c65d6cfc9-bsnhb" [1719c402-d9cd-43d4-af23-a0333df02866] Running
I1014 13:41:52.773936 8300 system_pods.go:89] "csi-hostpath-attacher-0" [1e5df543-7e1e-48cb-9857-ad4fa55eecc3] Running
I1014 13:41:52.773941 8300 system_pods.go:89] "csi-hostpath-resizer-0" [3aacd79a-b371-4b56-bf98-d444c83b9439] Running
I1014 13:41:52.773969 8300 system_pods.go:89] "csi-hostpathplugin-jrvhl" [cd5f386d-cfc5-4dc6-9ec6-5643a4184f8c] Running
I1014 13:41:52.773981 8300 system_pods.go:89] "etcd-addons-002422" [055ec4e6-1017-4a4e-be4f-7a71bf7807a4] Running
I1014 13:41:52.773987 8300 system_pods.go:89] "kindnet-xjsm2" [e0634e3a-e89d-46c3-befa-fa9f56e48570] Running
I1014 13:41:52.773993 8300 system_pods.go:89] "kube-apiserver-addons-002422" [125f5bf2-9f9b-4b6f-b862-494aa9801820] Running
I1014 13:41:52.773997 8300 system_pods.go:89] "kube-controller-manager-addons-002422" [a31d6a59-7270-4061-92a4-5065ef2d5330] Running
I1014 13:41:52.774002 8300 system_pods.go:89] "kube-ingress-dns-minikube" [85b77aed-3ee1-4f75-97b3-879fb269f534] Running
I1014 13:41:52.774006 8300 system_pods.go:89] "kube-proxy-l8cm8" [c57ee3d5-8ab2-46bd-b68b-80f6c3904d40] Running
I1014 13:41:52.774012 8300 system_pods.go:89] "kube-scheduler-addons-002422" [1dc281ca-83cd-4762-9821-4e17445ccfea] Running
I1014 13:41:52.774017 8300 system_pods.go:89] "metrics-server-84c5f94fbc-p68nc" [344d0c1c-bbea-4de6-a079-724c18606d38] Running
I1014 13:41:52.774021 8300 system_pods.go:89] "nvidia-device-plugin-daemonset-tnngr" [a113dbce-1d95-437b-83fc-dd34499d10e4] Running
I1014 13:41:52.774024 8300 system_pods.go:89] "registry-66c9cd494c-ddkrt" [091b0f03-dc90-4b2b-bbd3-c73a13edd832] Running
I1014 13:41:52.774028 8300 system_pods.go:89] "registry-proxy-wjht4" [7f1138a2-5ec8-4c04-a3b7-fdb6c0af33aa] Running
I1014 13:41:52.774054 8300 system_pods.go:89] "snapshot-controller-56fcc65765-d9p5h" [272bc704-122e-4ffe-a624-e7051cb8832f] Running
I1014 13:41:52.774059 8300 system_pods.go:89] "snapshot-controller-56fcc65765-pq9xk" [c3e18049-be5f-43ff-a507-33cabb741de9] Running
I1014 13:41:52.774063 8300 system_pods.go:89] "storage-provisioner" [832679c2-ca50-4565-b1cd-90c63d11988b] Running
I1014 13:41:52.774071 8300 system_pods.go:126] duration metric: took 10.242384ms to wait for k8s-apps to be running ...
I1014 13:41:52.774078 8300 system_svc.go:44] waiting for kubelet service to be running ....
I1014 13:41:52.774154 8300 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I1014 13:41:52.786726 8300 system_svc.go:56] duration metric: took 12.638293ms WaitForService to wait for kubelet
I1014 13:41:52.786757 8300 kubeadm.go:582] duration metric: took 2m0.279448218s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I1014 13:41:52.786776 8300 node_conditions.go:102] verifying NodePressure condition ...
I1014 13:41:52.790212 8300 node_conditions.go:122] node storage ephemeral capacity is 203034800Ki
I1014 13:41:52.790247 8300 node_conditions.go:123] node cpu capacity is 2
I1014 13:41:52.790259 8300 node_conditions.go:105] duration metric: took 3.477745ms to run NodePressure ...
I1014 13:41:52.790270 8300 start.go:241] waiting for startup goroutines ...
I1014 13:41:52.790278 8300 start.go:246] waiting for cluster config update ...
I1014 13:41:52.790293 8300 start.go:255] writing updated cluster config ...
I1014 13:41:52.790588 8300 ssh_runner.go:195] Run: rm -f paused
I1014 13:41:53.192225 8300 start.go:600] kubectl: 1.31.1, cluster: 1.31.1 (minor skew: 0)
I1014 13:41:53.193794 8300 out.go:177] * Done! kubectl is now configured to use "addons-002422" cluster and "default" namespace by default
==> CRI-O <==
Oct 14 13:43:47 addons-002422 crio[969]: time="2024-10-14 13:43:47.541718070Z" level=info msg="Removed pod sandbox: d383f386ecc93958e1322521c4dbeef31daa26ac017a25b2bb5d1ef706166ac7" id=0f575695-35bf-4a06-a9f4-7f65a887462a name=/runtime.v1.RuntimeService/RemovePodSandbox
Oct 14 13:45:57 addons-002422 crio[969]: time="2024-10-14 13:45:57.193915572Z" level=info msg="Running pod sandbox: default/hello-world-app-55bf9c44b4-pfhmd/POD" id=abaabee4-b904-4b27-b1b9-111ed933bd80 name=/runtime.v1.RuntimeService/RunPodSandbox
Oct 14 13:45:57 addons-002422 crio[969]: time="2024-10-14 13:45:57.193978218Z" level=warning msg="Allowed annotations are specified for workload []"
Oct 14 13:45:57 addons-002422 crio[969]: time="2024-10-14 13:45:57.228778918Z" level=info msg="Got pod network &{Name:hello-world-app-55bf9c44b4-pfhmd Namespace:default ID:fa1f79a1ddb533a93bd1615336d47515834052a4a2f5510ad35fcec5702e0eee UID:f5f83fdb-25be-40d5-9d3f-0e790983e8df NetNS:/var/run/netns/cac0241f-1b6b-48d7-a019-c38afe37bcf4 Networks:[] RuntimeConfig:map[kindnet:{IP: MAC: PortMappings:[] Bandwidth:<nil> IpRanges:[]}] Aliases:map[]}"
Oct 14 13:45:57 addons-002422 crio[969]: time="2024-10-14 13:45:57.228818237Z" level=info msg="Adding pod default_hello-world-app-55bf9c44b4-pfhmd to CNI network \"kindnet\" (type=ptp)"
Oct 14 13:45:57 addons-002422 crio[969]: time="2024-10-14 13:45:57.249390872Z" level=info msg="Got pod network &{Name:hello-world-app-55bf9c44b4-pfhmd Namespace:default ID:fa1f79a1ddb533a93bd1615336d47515834052a4a2f5510ad35fcec5702e0eee UID:f5f83fdb-25be-40d5-9d3f-0e790983e8df NetNS:/var/run/netns/cac0241f-1b6b-48d7-a019-c38afe37bcf4 Networks:[] RuntimeConfig:map[kindnet:{IP: MAC: PortMappings:[] Bandwidth:<nil> IpRanges:[]}] Aliases:map[]}"
Oct 14 13:45:57 addons-002422 crio[969]: time="2024-10-14 13:45:57.249540131Z" level=info msg="Checking pod default_hello-world-app-55bf9c44b4-pfhmd for CNI network kindnet (type=ptp)"
Oct 14 13:45:57 addons-002422 crio[969]: time="2024-10-14 13:45:57.252019486Z" level=info msg="Ran pod sandbox fa1f79a1ddb533a93bd1615336d47515834052a4a2f5510ad35fcec5702e0eee with infra container: default/hello-world-app-55bf9c44b4-pfhmd/POD" id=abaabee4-b904-4b27-b1b9-111ed933bd80 name=/runtime.v1.RuntimeService/RunPodSandbox
Oct 14 13:45:57 addons-002422 crio[969]: time="2024-10-14 13:45:57.253322557Z" level=info msg="Checking image status: docker.io/kicbase/echo-server:1.0" id=5cc71eaa-6e49-4e1c-a8ef-bd4fefb48850 name=/runtime.v1.ImageService/ImageStatus
Oct 14 13:45:57 addons-002422 crio[969]: time="2024-10-14 13:45:57.253534216Z" level=info msg="Image docker.io/kicbase/echo-server:1.0 not found" id=5cc71eaa-6e49-4e1c-a8ef-bd4fefb48850 name=/runtime.v1.ImageService/ImageStatus
Oct 14 13:45:57 addons-002422 crio[969]: time="2024-10-14 13:45:57.255982982Z" level=info msg="Pulling image: docker.io/kicbase/echo-server:1.0" id=b57293eb-f1dc-494b-ba06-283415c28bec name=/runtime.v1.ImageService/PullImage
Oct 14 13:45:57 addons-002422 crio[969]: time="2024-10-14 13:45:57.262984477Z" level=info msg="Trying to access \"docker.io/kicbase/echo-server:1.0\""
Oct 14 13:45:57 addons-002422 crio[969]: time="2024-10-14 13:45:57.595497646Z" level=info msg="Trying to access \"docker.io/kicbase/echo-server:1.0\""
Oct 14 13:45:58 addons-002422 crio[969]: time="2024-10-14 13:45:58.399940790Z" level=info msg="Pulled image: docker.io/kicbase/echo-server@sha256:127ac38a2bb9537b7f252addff209ea6801edcac8a92c8b1104dacd66a583ed6" id=b57293eb-f1dc-494b-ba06-283415c28bec name=/runtime.v1.ImageService/PullImage
Oct 14 13:45:58 addons-002422 crio[969]: time="2024-10-14 13:45:58.400547722Z" level=info msg="Checking image status: docker.io/kicbase/echo-server:1.0" id=e7dfb89f-6ea9-4128-b341-3af5f145fe33 name=/runtime.v1.ImageService/ImageStatus
Oct 14 13:45:58 addons-002422 crio[969]: time="2024-10-14 13:45:58.401354391Z" level=info msg="Image status: &ImageStatusResponse{Image:&Image{Id:ce2d2cda2d858fdaea84129deb86d18e5dbf1c548f230b79fdca74cc91729d17,RepoTags:[docker.io/kicbase/echo-server:1.0],RepoDigests:[docker.io/kicbase/echo-server@sha256:127ac38a2bb9537b7f252addff209ea6801edcac8a92c8b1104dacd66a583ed6 docker.io/kicbase/echo-server@sha256:42a89d9b22e5307cb88494990d5d929c401339f508c0a7e98a4d8ac52623fc5b],Size_:4789170,Uid:nil,Username:,Spec:nil,},Info:map[string]string{},}" id=e7dfb89f-6ea9-4128-b341-3af5f145fe33 name=/runtime.v1.ImageService/ImageStatus
Oct 14 13:45:58 addons-002422 crio[969]: time="2024-10-14 13:45:58.402388908Z" level=info msg="Checking image status: docker.io/kicbase/echo-server:1.0" id=e129ca63-0d6f-424d-b114-f03c593d7bff name=/runtime.v1.ImageService/ImageStatus
Oct 14 13:45:58 addons-002422 crio[969]: time="2024-10-14 13:45:58.403001099Z" level=info msg="Image status: &ImageStatusResponse{Image:&Image{Id:ce2d2cda2d858fdaea84129deb86d18e5dbf1c548f230b79fdca74cc91729d17,RepoTags:[docker.io/kicbase/echo-server:1.0],RepoDigests:[docker.io/kicbase/echo-server@sha256:127ac38a2bb9537b7f252addff209ea6801edcac8a92c8b1104dacd66a583ed6 docker.io/kicbase/echo-server@sha256:42a89d9b22e5307cb88494990d5d929c401339f508c0a7e98a4d8ac52623fc5b],Size_:4789170,Uid:nil,Username:,Spec:nil,},Info:map[string]string{},}" id=e129ca63-0d6f-424d-b114-f03c593d7bff name=/runtime.v1.ImageService/ImageStatus
Oct 14 13:45:58 addons-002422 crio[969]: time="2024-10-14 13:45:58.403990923Z" level=info msg="Creating container: default/hello-world-app-55bf9c44b4-pfhmd/hello-world-app" id=6109060d-3776-49af-9938-89ab1834929f name=/runtime.v1.RuntimeService/CreateContainer
Oct 14 13:45:58 addons-002422 crio[969]: time="2024-10-14 13:45:58.404081713Z" level=warning msg="Allowed annotations are specified for workload []"
Oct 14 13:45:58 addons-002422 crio[969]: time="2024-10-14 13:45:58.428848046Z" level=warning msg="Failed to open /etc/passwd: open /var/lib/containers/storage/overlay/4fae11728443a26f00bc8a2f06c7b01a56fcbf4511fa08206c8044408341461f/merged/etc/passwd: no such file or directory"
Oct 14 13:45:58 addons-002422 crio[969]: time="2024-10-14 13:45:58.429019541Z" level=warning msg="Failed to open /etc/group: open /var/lib/containers/storage/overlay/4fae11728443a26f00bc8a2f06c7b01a56fcbf4511fa08206c8044408341461f/merged/etc/group: no such file or directory"
Oct 14 13:45:58 addons-002422 crio[969]: time="2024-10-14 13:45:58.484416310Z" level=info msg="Created container 7581e29d62f8c054b9ddfde6c9962368ba0dc67a16ba8b602e15d90fdcda758f: default/hello-world-app-55bf9c44b4-pfhmd/hello-world-app" id=6109060d-3776-49af-9938-89ab1834929f name=/runtime.v1.RuntimeService/CreateContainer
Oct 14 13:45:58 addons-002422 crio[969]: time="2024-10-14 13:45:58.485404698Z" level=info msg="Starting container: 7581e29d62f8c054b9ddfde6c9962368ba0dc67a16ba8b602e15d90fdcda758f" id=8ca58d98-f760-4f4d-8c58-6e4667b27a2f name=/runtime.v1.RuntimeService/StartContainer
Oct 14 13:45:58 addons-002422 crio[969]: time="2024-10-14 13:45:58.505374298Z" level=info msg="Started container" PID=8367 containerID=7581e29d62f8c054b9ddfde6c9962368ba0dc67a16ba8b602e15d90fdcda758f description=default/hello-world-app-55bf9c44b4-pfhmd/hello-world-app id=8ca58d98-f760-4f4d-8c58-6e4667b27a2f name=/runtime.v1.RuntimeService/StartContainer sandboxID=fa1f79a1ddb533a93bd1615336d47515834052a4a2f5510ad35fcec5702e0eee
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD
7581e29d62f8c docker.io/kicbase/echo-server@sha256:127ac38a2bb9537b7f252addff209ea6801edcac8a92c8b1104dacd66a583ed6 Less than a second ago Running hello-world-app 0 fa1f79a1ddb53 hello-world-app-55bf9c44b4-pfhmd
a7421d31433bb docker.io/library/nginx@sha256:2140dad235c130ac861018a4e13a6bc8aea3a35f3a40e20c1b060d51a7efd250 2 minutes ago Running nginx 0 35c7a64d1ead9 nginx
0a3873b6a1313 gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e 4 minutes ago Running busybox 0 0b9b34a5ff6d3 busybox
0116313dbe028 registry.k8s.io/ingress-nginx/controller@sha256:787a5408fa511266888b2e765f9666bee67d9bf2518a6b7cfd4ab6cc01c22eee 4 minutes ago Running controller 0 36d43cb197cae ingress-nginx-controller-5f85ff4588-2wmx4
cdeac225d13a4 docker.io/marcnuri/yakd@sha256:1c961556224d57fc747de0b1874524208e5fb4f8386f23e9c1c4c18e97109f17 5 minutes ago Running yakd 0 089c79855b00d yakd-dashboard-67d98fc6b-qgctz
a127dc0621a8c nvcr.io/nvidia/k8s-device-plugin@sha256:cdd05f9d89f0552478d46474005e86b98795ad364664f644225b99d94978e680 5 minutes ago Running nvidia-device-plugin-ctr 0 ea8114682b7cb nvidia-device-plugin-daemonset-tnngr
393d92e1891bd docker.io/rancher/local-path-provisioner@sha256:689a2489a24e74426e4a4666e611c988202c5fa995908b0c60133aca3eb87d98 5 minutes ago Running local-path-provisioner 0 3c6acef92b0a2 local-path-provisioner-86d989889c-8sdx4
d7554a361bf43 registry.k8s.io/ingress-nginx/kube-webhook-certgen@sha256:0550b75a965592f1dde3fbeaa98f67a1e10c5a086bcd69a29054cc4edcb56771 5 minutes ago Exited patch 0 d9afc9d8f05ea ingress-nginx-admission-patch-m2gmb
4bba5dbe92734 registry.k8s.io/ingress-nginx/kube-webhook-certgen@sha256:0550b75a965592f1dde3fbeaa98f67a1e10c5a086bcd69a29054cc4edcb56771 5 minutes ago Exited create 0 11524833ee68f ingress-nginx-admission-create-wp9ww
bd13935951c31 gcr.io/k8s-minikube/minikube-ingress-dns@sha256:4211a1de532376c881851542238121b26792225faa36a7b02dccad88fd05797c 5 minutes ago Running minikube-ingress-dns 0 f02059b197e72 kube-ingress-dns-minikube
fa367e6127e27 gcr.io/cloud-spanner-emulator/emulator@sha256:6ce1265c73355797b34d2531c7146eed3996346f860517e35d1434182eb5f01d 5 minutes ago Running cloud-spanner-emulator 0 f8152ffb6c4a3 cloud-spanner-emulator-5b584cc74-fwt5t
57a5d29f5a270 registry.k8s.io/metrics-server/metrics-server@sha256:048bcf48fc2cce517a61777e22bac782ba59ea5e9b9a54bcb42dbee99566a91f 5 minutes ago Running metrics-server 0 f5e4a601392aa metrics-server-84c5f94fbc-p68nc
ada184f93dd5b 2f6c962e7b8311337352d9fdea917da2184d9919f4da7695bc2a6517cf392fe4 5 minutes ago Running coredns 0 daba31545a435 coredns-7c65d6cfc9-bsnhb
749f7ebdaeaf5 ba04bb24b95753201135cbc420b233c1b0b9fa2e1fd21d28319c348c33fbcde6 5 minutes ago Running storage-provisioner 0 1c28befd43fbe storage-provisioner
47e55f64e180f docker.io/kindest/kindnetd@sha256:a454aa48d8e10631411378503103b251e3f52856d8be2535efb73a92fa2c0387 6 minutes ago Running kindnet-cni 0 d3f853ecbc8ad kindnet-xjsm2
09ddfab546738 24a140c548c075e487e45d0ee73b1aa89f8bfb40c08a57e05975559728822b1d 6 minutes ago Running kube-proxy 0 8d6d9e6d67223 kube-proxy-l8cm8
1028165ec0621 27e3830e1402783674d8b594038967deea9d51f0d91b34c93c8f39d2f68af7da 6 minutes ago Running etcd 0 04b6b690c81f9 etcd-addons-002422
62098d1172497 7f8aa378bb47dffcf430f3a601abe39137e88aee0238e23ed8530fdd18dab82d 6 minutes ago Running kube-scheduler 0 1d827eff7713c kube-scheduler-addons-002422
3e4cf70c88184 279f381cb37365bbbcd133c9531fba9c2beb0f38dbbe6ddfcd0b1b1643d3450e 6 minutes ago Running kube-controller-manager 0 76c74a21d4af4 kube-controller-manager-addons-002422
8b5eecbb1fe82 d3f53a98c0a9d9163c4848bcf34b2d2f5e1e3691b79f3d1dd6d0206809e02853 6 minutes ago Running kube-apiserver 0 93ae1f0de0f96 kube-apiserver-addons-002422
==> coredns [ada184f93dd5b79075f6ed44ddcb44b635fe24c3c69ea498e797aabea7f5ee5f] <==
[INFO] 10.244.0.8:44372 - 27010 "AAAA IN registry.kube-system.svc.cluster.local.us-east-2.compute.internal. udp 94 false 1232" NXDOMAIN qr,rd,ra 83 0.002586112s
[INFO] 10.244.0.8:44372 - 19887 "A IN registry.kube-system.svc.cluster.local. udp 67 false 1232" NOERROR qr,aa,rd 110 0.000242518s
[INFO] 10.244.0.8:44372 - 3863 "AAAA IN registry.kube-system.svc.cluster.local. udp 67 false 1232" NOERROR qr,aa,rd 149 0.000153181s
[INFO] 10.244.0.8:39157 - 45011 "A IN registry.kube-system.svc.cluster.local.kube-system.svc.cluster.local. udp 86 false 512" NXDOMAIN qr,aa,rd 179 0.000118949s
[INFO] 10.244.0.8:39157 - 45207 "AAAA IN registry.kube-system.svc.cluster.local.kube-system.svc.cluster.local. udp 86 false 512" NXDOMAIN qr,aa,rd 179 0.000082117s
[INFO] 10.244.0.8:34360 - 3935 "A IN registry.kube-system.svc.cluster.local.svc.cluster.local. udp 74 false 512" NXDOMAIN qr,aa,rd 167 0.000062038s
[INFO] 10.244.0.8:34360 - 4124 "AAAA IN registry.kube-system.svc.cluster.local.svc.cluster.local. udp 74 false 512" NXDOMAIN qr,aa,rd 167 0.00015497s
[INFO] 10.244.0.8:60782 - 1691 "AAAA IN registry.kube-system.svc.cluster.local.cluster.local. udp 70 false 512" NXDOMAIN qr,aa,rd 163 0.000054736s
[INFO] 10.244.0.8:60782 - 1519 "A IN registry.kube-system.svc.cluster.local.cluster.local. udp 70 false 512" NXDOMAIN qr,aa,rd 163 0.000059619s
[INFO] 10.244.0.8:37523 - 7894 "AAAA IN registry.kube-system.svc.cluster.local.us-east-2.compute.internal. udp 83 false 512" NXDOMAIN qr,rd,ra 83 0.001638307s
[INFO] 10.244.0.8:37523 - 7433 "A IN registry.kube-system.svc.cluster.local.us-east-2.compute.internal. udp 83 false 512" NXDOMAIN qr,rd,ra 83 0.00171468s
[INFO] 10.244.0.8:45406 - 63109 "A IN registry.kube-system.svc.cluster.local. udp 56 false 512" NOERROR qr,aa,rd 110 0.000089871s
[INFO] 10.244.0.8:45406 - 63265 "AAAA IN registry.kube-system.svc.cluster.local. udp 56 false 512" NOERROR qr,aa,rd 149 0.000062466s
[INFO] 10.244.0.21:57286 - 54240 "AAAA IN storage.googleapis.com.gcp-auth.svc.cluster.local. udp 78 false 1232" NXDOMAIN qr,aa,rd 160 0.000155766s
[INFO] 10.244.0.21:34847 - 43639 "A IN storage.googleapis.com.gcp-auth.svc.cluster.local. udp 78 false 1232" NXDOMAIN qr,aa,rd 160 0.000185936s
[INFO] 10.244.0.21:41613 - 34227 "A IN storage.googleapis.com.svc.cluster.local. udp 69 false 1232" NXDOMAIN qr,aa,rd 151 0.002258096s
[INFO] 10.244.0.21:48605 - 55375 "AAAA IN storage.googleapis.com.svc.cluster.local. udp 69 false 1232" NXDOMAIN qr,aa,rd 151 0.000334095s
[INFO] 10.244.0.21:57425 - 41543 "A IN storage.googleapis.com.cluster.local. udp 65 false 1232" NXDOMAIN qr,aa,rd 147 0.000180587s
[INFO] 10.244.0.21:35525 - 24386 "AAAA IN storage.googleapis.com.cluster.local. udp 65 false 1232" NXDOMAIN qr,aa,rd 147 0.000100668s
[INFO] 10.244.0.21:48141 - 20043 "AAAA IN storage.googleapis.com.us-east-2.compute.internal. udp 78 false 1232" NXDOMAIN qr,rd,ra 67 0.0031399s
[INFO] 10.244.0.21:47055 - 2655 "A IN storage.googleapis.com.us-east-2.compute.internal. udp 78 false 1232" NXDOMAIN qr,rd,ra 67 0.003316458s
[INFO] 10.244.0.21:37278 - 54331 "AAAA IN storage.googleapis.com. udp 51 false 1232" NOERROR qr,rd,ra 240 0.000875584s
[INFO] 10.244.0.21:36351 - 2614 "A IN storage.googleapis.com. udp 51 false 1232" NOERROR qr,rd,ra 648 0.00160556s
[INFO] 10.244.0.24:58786 - 2 "AAAA IN registry.kube-system.svc.cluster.local. udp 56 false 512" NOERROR qr,aa,rd 149 0.000197833s
[INFO] 10.244.0.24:33286 - 3 "A IN registry.kube-system.svc.cluster.local. udp 56 false 512" NOERROR qr,aa,rd 110 0.000127195s
==> describe nodes <==
Name: addons-002422
Roles: control-plane
Labels: beta.kubernetes.io/arch=arm64
beta.kubernetes.io/os=linux
kubernetes.io/arch=arm64
kubernetes.io/hostname=addons-002422
kubernetes.io/os=linux
minikube.k8s.io/commit=f9f6c2ada6d933af9900f45012fe0fe625736c5b
minikube.k8s.io/name=addons-002422
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2024_10_14T13_39_48_0700
minikube.k8s.io/version=v1.34.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
topology.hostpath.csi/node=addons-002422
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///var/run/crio/crio.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Mon, 14 Oct 2024 13:39:44 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: addons-002422
AcquireTime: <unset>
RenewTime: Mon, 14 Oct 2024 13:45:54 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Mon, 14 Oct 2024 13:43:52 +0000 Mon, 14 Oct 2024 13:39:41 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Mon, 14 Oct 2024 13:43:52 +0000 Mon, 14 Oct 2024 13:39:41 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Mon, 14 Oct 2024 13:43:52 +0000 Mon, 14 Oct 2024 13:39:41 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Mon, 14 Oct 2024 13:43:52 +0000 Mon, 14 Oct 2024 13:40:08 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.49.2
Hostname: addons-002422
Capacity:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022296Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022296Ki
pods: 110
System Info:
Machine ID: 216d99f7dc424e599d6a70e41b29e088
System UUID: 51be1b84-8333-4024-a862-c04d66a5271b
Boot ID: c1fb5e99-d9c3-4e62-b114-4b2c9a33f58a
Kernel Version: 5.15.0-1070-aws
OS Image: Ubuntu 22.04.5 LTS
Operating System: linux
Architecture: arm64
Container Runtime Version: cri-o://1.24.6
Kubelet Version: v1.31.1
Kube-Proxy Version: v1.31.1
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (18 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 4m6s
default cloud-spanner-emulator-5b584cc74-fwt5t 0 (0%) 0 (0%) 0 (0%) 0 (0%) 6m3s
default hello-world-app-55bf9c44b4-pfhmd 0 (0%) 0 (0%) 0 (0%) 0 (0%) 3s
default nginx 0 (0%) 0 (0%) 0 (0%) 0 (0%) 2m22s
ingress-nginx ingress-nginx-controller-5f85ff4588-2wmx4 100m (5%) 0 (0%) 90Mi (1%) 0 (0%) 6m
kube-system coredns-7c65d6cfc9-bsnhb 100m (5%) 0 (0%) 70Mi (0%) 170Mi (2%) 6m7s
kube-system etcd-addons-002422 100m (5%) 0 (0%) 100Mi (1%) 0 (0%) 6m12s
kube-system kindnet-xjsm2 100m (5%) 100m (5%) 50Mi (0%) 50Mi (0%) 6m8s
kube-system kube-apiserver-addons-002422 250m (12%) 0 (0%) 0 (0%) 0 (0%) 6m12s
kube-system kube-controller-manager-addons-002422 200m (10%) 0 (0%) 0 (0%) 0 (0%) 6m12s
kube-system kube-ingress-dns-minikube 0 (0%) 0 (0%) 0 (0%) 0 (0%) 6m2s
kube-system kube-proxy-l8cm8 0 (0%) 0 (0%) 0 (0%) 0 (0%) 6m8s
kube-system kube-scheduler-addons-002422 100m (5%) 0 (0%) 0 (0%) 0 (0%) 6m12s
kube-system metrics-server-84c5f94fbc-p68nc 100m (5%) 0 (0%) 200Mi (2%) 0 (0%) 6m2s
kube-system nvidia-device-plugin-daemonset-tnngr 0 (0%) 0 (0%) 0 (0%) 0 (0%) 5m51s
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 6m1s
local-path-storage local-path-provisioner-86d989889c-8sdx4 0 (0%) 0 (0%) 0 (0%) 0 (0%) 6m1s
yakd-dashboard yakd-dashboard-67d98fc6b-qgctz 0 (0%) 0 (0%) 128Mi (1%) 256Mi (3%) 6m1s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 1050m (52%) 100m (5%)
memory 638Mi (8%) 476Mi (6%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
hugepages-32Mi 0 (0%) 0 (0%)
hugepages-64Ki 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 6m6s kube-proxy
Normal NodeHasSufficientMemory 6m19s (x8 over 6m19s) kubelet Node addons-002422 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 6m19s (x8 over 6m19s) kubelet Node addons-002422 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 6m19s (x7 over 6m19s) kubelet Node addons-002422 status is now: NodeHasSufficientPID
Normal Starting 6m12s kubelet Starting kubelet.
Warning CgroupV1 6m12s kubelet Cgroup v1 support is in maintenance mode, please migrate to Cgroup v2.
Normal NodeHasSufficientMemory 6m12s (x2 over 6m12s) kubelet Node addons-002422 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 6m12s (x2 over 6m12s) kubelet Node addons-002422 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 6m12s (x2 over 6m12s) kubelet Node addons-002422 status is now: NodeHasSufficientPID
Normal RegisteredNode 6m8s node-controller Node addons-002422 event: Registered Node addons-002422 in Controller
Normal NodeReady 5m51s kubelet Node addons-002422 status is now: NodeReady
==> dmesg <==
[Oct14 13:17] ACPI: SRAT not present
[ +0.000000] ACPI: SRAT not present
[ +0.000000] SPI driver altr_a10sr has no spi_device_id for altr,a10sr
[ +0.014835] device-mapper: core: CONFIG_IMA_DISABLE_HTABLE is disabled. Duplicate IMA measurements will not be recorded in the IMA log.
[ +0.475618] systemd[1]: Configuration file /run/systemd/system/netplan-ovs-cleanup.service is marked world-inaccessible. This has no effect as configuration data is accessible via APIs without restrictions. Proceeding anyway.
[ +0.053479] systemd[1]: /lib/systemd/system/cloud-init.service:20: Unknown key name 'ConditionEnvironment' in section 'Unit', ignoring.
[ +0.015843] systemd[1]: /lib/systemd/system/cloud-init-hotplugd.socket:11: Unknown key name 'ConditionEnvironment' in section 'Unit', ignoring.
[ +0.695923] ena 0000:00:05.0: LLQ is not supported Fallback to host mode policy.
[ +5.686422] kauditd_printk_skb: 34 callbacks suppressed
==> etcd [1028165ec062157439f733ab6a35f8de542a7bec1f3b417ae6d993ec6d72f896] <==
{"level":"info","ts":"2024-10-14T13:39:41.673517Z","caller":"v3rpc/health.go:61","msg":"grpc service status changed","service":"","status":"SERVING"}
{"level":"info","ts":"2024-10-14T13:39:41.674471Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"info","ts":"2024-10-14T13:39:41.677088Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"fa54960ea34d58be","local-member-id":"aec36adc501070cc","cluster-version":"3.5"}
{"level":"info","ts":"2024-10-14T13:39:41.677213Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2024-10-14T13:39:41.677265Z","caller":"etcdserver/server.go:2653","msg":"cluster version is updated","cluster-version":"3.5"}
{"level":"info","ts":"2024-10-14T13:39:41.746155Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.49.2:2379"}
{"level":"info","ts":"2024-10-14T13:39:55.704344Z","caller":"traceutil/trace.go:171","msg":"trace[1703535512] transaction","detail":"{read_only:false; response_revision:398; number_of_response:1; }","duration":"125.061507ms","start":"2024-10-14T13:39:55.579265Z","end":"2024-10-14T13:39:55.704327Z","steps":["trace[1703535512] 'process raft request' (duration: 100.74032ms)","trace[1703535512] 'compare' (duration: 24.075148ms)"],"step_count":2}
{"level":"warn","ts":"2024-10-14T13:39:55.709244Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"104.350627ms","expected-duration":"100ms","prefix":"read-only range ","request":"limit:1 keys_only:true ","response":"range_response_count:0 size:5"}
{"level":"info","ts":"2024-10-14T13:39:55.742502Z","caller":"traceutil/trace.go:171","msg":"trace[1742711474] range","detail":"{range_begin:; range_end:; response_count:0; response_revision:398; }","duration":"137.617824ms","start":"2024-10-14T13:39:55.604866Z","end":"2024-10-14T13:39:55.742484Z","steps":["trace[1742711474] 'agreement among raft nodes before linearized reading' (duration: 104.309799ms)"],"step_count":1}
{"level":"warn","ts":"2024-10-14T13:39:55.709463Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"104.395559ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/namespaces/kube-system\" ","response":"range_response_count:1 size:351"}
{"level":"info","ts":"2024-10-14T13:39:55.743038Z","caller":"traceutil/trace.go:171","msg":"trace[1917448555] range","detail":"{range_begin:/registry/namespaces/kube-system; range_end:; response_count:1; response_revision:400; }","duration":"137.96655ms","start":"2024-10-14T13:39:55.605059Z","end":"2024-10-14T13:39:55.743026Z","steps":["trace[1917448555] 'agreement among raft nodes before linearized reading' (duration: 104.369401ms)"],"step_count":1}
{"level":"warn","ts":"2024-10-14T13:39:56.559335Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"101.136832ms","expected-duration":"100ms","prefix":"","request":"header:<ID:8128032554294518971 username:\"kube-apiserver-etcd-client\" auth_revision:1 > txn:<compare:<target:MOD key:\"/registry/daemonsets/kube-system/nvidia-device-plugin-daemonset\" mod_revision:399 > success:<request_put:<key:\"/registry/daemonsets/kube-system/nvidia-device-plugin-daemonset\" value_size:3174 >> failure:<request_range:<key:\"/registry/daemonsets/kube-system/nvidia-device-plugin-daemonset\" > >>","response":"size:16"}
{"level":"info","ts":"2024-10-14T13:39:56.567126Z","caller":"traceutil/trace.go:171","msg":"trace[696180709] transaction","detail":"{read_only:false; response_revision:409; number_of_response:1; }","duration":"201.90427ms","start":"2024-10-14T13:39:56.365204Z","end":"2024-10-14T13:39:56.567108Z","steps":["trace[696180709] 'process raft request' (duration: 89.922009ms)","trace[696180709] 'compare' (duration: 101.057432ms)"],"step_count":2}
{"level":"info","ts":"2024-10-14T13:39:56.567433Z","caller":"traceutil/trace.go:171","msg":"trace[208790586] transaction","detail":"{read_only:false; response_revision:410; number_of_response:1; }","duration":"202.128434ms","start":"2024-10-14T13:39:56.365293Z","end":"2024-10-14T13:39:56.567421Z","steps":["trace[208790586] 'process raft request' (duration: 194.656711ms)"],"step_count":1}
{"level":"info","ts":"2024-10-14T13:39:56.567731Z","caller":"traceutil/trace.go:171","msg":"trace[2018964444] transaction","detail":"{read_only:false; response_revision:411; number_of_response:1; }","duration":"202.214382ms","start":"2024-10-14T13:39:56.365509Z","end":"2024-10-14T13:39:56.567723Z","steps":["trace[2018964444] 'process raft request' (duration: 194.531967ms)"],"step_count":1}
{"level":"info","ts":"2024-10-14T13:39:56.567891Z","caller":"traceutil/trace.go:171","msg":"trace[1274013744] transaction","detail":"{read_only:false; response_revision:412; number_of_response:1; }","duration":"195.842423ms","start":"2024-10-14T13:39:56.372041Z","end":"2024-10-14T13:39:56.567884Z","steps":["trace[1274013744] 'process raft request' (duration: 188.046179ms)"],"step_count":1}
{"level":"info","ts":"2024-10-14T13:39:56.567917Z","caller":"traceutil/trace.go:171","msg":"trace[1855707437] transaction","detail":"{read_only:false; response_revision:413; number_of_response:1; }","duration":"195.803604ms","start":"2024-10-14T13:39:56.372108Z","end":"2024-10-14T13:39:56.567912Z","steps":["trace[1855707437] 'process raft request' (duration: 188.006999ms)"],"step_count":1}
{"level":"warn","ts":"2024-10-14T13:39:56.568946Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"161.89416ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/apiregistration.k8s.io/apiservices/v1beta1.metrics.k8s.io\" ","response":"range_response_count:0 size:5"}
{"level":"info","ts":"2024-10-14T13:39:56.601194Z","caller":"traceutil/trace.go:171","msg":"trace[345260323] range","detail":"{range_begin:/registry/apiregistration.k8s.io/apiservices/v1beta1.metrics.k8s.io; range_end:; response_count:0; response_revision:413; }","duration":"195.413853ms","start":"2024-10-14T13:39:56.405766Z","end":"2024-10-14T13:39:56.601180Z","steps":["trace[345260323] 'agreement among raft nodes before linearized reading' (duration: 161.872631ms)"],"step_count":1}
{"level":"info","ts":"2024-10-14T13:39:56.567358Z","caller":"traceutil/trace.go:171","msg":"trace[1219537254] linearizableReadLoop","detail":"{readStateIndex:426; appliedIndex:420; }","duration":"161.573333ms","start":"2024-10-14T13:39:56.405771Z","end":"2024-10-14T13:39:56.567344Z","steps":["trace[1219537254] 'read index received' (duration: 6.757042ms)","trace[1219537254] 'applied index is now lower than readState.Index' (duration: 154.814182ms)"],"step_count":2}
{"level":"info","ts":"2024-10-14T13:39:56.569116Z","caller":"traceutil/trace.go:171","msg":"trace[1142324761] transaction","detail":"{read_only:false; response_revision:414; number_of_response:1; }","duration":"143.450437ms","start":"2024-10-14T13:39:56.425656Z","end":"2024-10-14T13:39:56.569106Z","steps":["trace[1142324761] 'process raft request' (duration: 142.359271ms)"],"step_count":1}
{"level":"warn","ts":"2024-10-14T13:39:56.614133Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"159.027147ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/minions/addons-002422\" ","response":"range_response_count:1 size:5745"}
{"level":"info","ts":"2024-10-14T13:39:56.614713Z","caller":"traceutil/trace.go:171","msg":"trace[111109841] range","detail":"{range_begin:/registry/minions/addons-002422; range_end:; response_count:1; response_revision:418; }","duration":"159.613731ms","start":"2024-10-14T13:39:56.455086Z","end":"2024-10-14T13:39:56.614700Z","steps":["trace[111109841] 'agreement among raft nodes before linearized reading' (duration: 159.001309ms)"],"step_count":1}
{"level":"warn","ts":"2024-10-14T13:39:56.614968Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"189.299264ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/namespaces/local-path-storage\" ","response":"range_response_count:0 size:5"}
{"level":"info","ts":"2024-10-14T13:39:56.616701Z","caller":"traceutil/trace.go:171","msg":"trace[633126474] range","detail":"{range_begin:/registry/namespaces/local-path-storage; range_end:; response_count:0; response_revision:418; }","duration":"191.08861ms","start":"2024-10-14T13:39:56.425600Z","end":"2024-10-14T13:39:56.616689Z","steps":["trace[633126474] 'agreement among raft nodes before linearized reading' (duration: 189.273172ms)"],"step_count":1}
==> kernel <==
13:45:59 up 28 min, 0 users, load average: 0.03, 0.57, 0.41
Linux addons-002422 5.15.0-1070-aws #76~20.04.1-Ubuntu SMP Mon Sep 2 12:20:48 UTC 2024 aarch64 aarch64 aarch64 GNU/Linux
PRETTY_NAME="Ubuntu 22.04.5 LTS"
==> kindnet [47e55f64e180ffb927512f85e202bb19ab2a989edeae9f3711eb8b4b9204e17e] <==
I1014 13:43:58.444252 1 main.go:300] handling current node
I1014 13:44:08.449096 1 main.go:296] Handling node with IPs: map[192.168.49.2:{}]
I1014 13:44:08.449129 1 main.go:300] handling current node
I1014 13:44:18.449666 1 main.go:296] Handling node with IPs: map[192.168.49.2:{}]
I1014 13:44:18.449699 1 main.go:300] handling current node
I1014 13:44:28.449117 1 main.go:296] Handling node with IPs: map[192.168.49.2:{}]
I1014 13:44:28.449151 1 main.go:300] handling current node
I1014 13:44:38.449068 1 main.go:296] Handling node with IPs: map[192.168.49.2:{}]
I1014 13:44:38.449103 1 main.go:300] handling current node
I1014 13:44:48.449985 1 main.go:296] Handling node with IPs: map[192.168.49.2:{}]
I1014 13:44:48.450019 1 main.go:300] handling current node
I1014 13:44:58.444557 1 main.go:296] Handling node with IPs: map[192.168.49.2:{}]
I1014 13:44:58.444586 1 main.go:300] handling current node
I1014 13:45:08.443975 1 main.go:296] Handling node with IPs: map[192.168.49.2:{}]
I1014 13:45:08.444087 1 main.go:300] handling current node
I1014 13:45:18.445388 1 main.go:296] Handling node with IPs: map[192.168.49.2:{}]
I1014 13:45:18.445423 1 main.go:300] handling current node
I1014 13:45:28.444692 1 main.go:296] Handling node with IPs: map[192.168.49.2:{}]
I1014 13:45:28.444760 1 main.go:300] handling current node
I1014 13:45:38.448883 1 main.go:296] Handling node with IPs: map[192.168.49.2:{}]
I1014 13:45:38.448922 1 main.go:300] handling current node
I1014 13:45:48.450748 1 main.go:296] Handling node with IPs: map[192.168.49.2:{}]
I1014 13:45:48.450781 1 main.go:300] handling current node
I1014 13:45:58.444037 1 main.go:296] Handling node with IPs: map[192.168.49.2:{}]
I1014 13:45:58.444074 1 main.go:300] handling current node
==> kube-apiserver [8b5eecbb1fe82d3ce49c4e32d7e54fd8dad0e826a894d6005ed7aac0c04bef74] <==
E1014 13:41:19.024702 1 controller.go:146] "Unhandled Error" err=<
Error updating APIService "v1beta1.metrics.k8s.io" with err: failed to download v1beta1.metrics.k8s.io: failed to retrieve openAPI spec, http error: ResponseCode: 503, Body: service unavailable
, Header: map[Content-Type:[text/plain; charset=utf-8] X-Content-Type-Options:[nosniff]]
> logger="UnhandledError"
I1014 13:41:19.102837 1 handler.go:286] Adding GroupVersion metrics.k8s.io v1beta1 to ResourceManager
E1014 13:42:05.216035 1 conn.go:339] Error on socket receive: read tcp 192.168.49.2:8443->192.168.49.1:44098: use of closed network connection
E1014 13:42:05.455514 1 conn.go:339] Error on socket receive: read tcp 192.168.49.2:8443->192.168.49.1:44118: use of closed network connection
I1014 13:42:14.866585 1 alloc.go:330] "allocated clusterIPs" service="headlamp/headlamp" clusterIPs={"IPv4":"10.107.229.9"}
I1014 13:43:03.063091 1 controller.go:615] quota admission added evaluator for: volumesnapshots.snapshot.storage.k8s.io
I1014 13:43:17.774362 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I1014 13:43:17.774425 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I1014 13:43:17.844707 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I1014 13:43:17.844880 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I1014 13:43:17.905822 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I1014 13:43:17.905940 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I1014 13:43:17.942397 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I1014 13:43:17.942432 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
W1014 13:43:18.908004 1 cacher.go:171] Terminating all watchers from cacher volumesnapshotclasses.snapshot.storage.k8s.io
W1014 13:43:18.942558 1 cacher.go:171] Terminating all watchers from cacher volumesnapshots.snapshot.storage.k8s.io
W1014 13:43:19.035704 1 cacher.go:171] Terminating all watchers from cacher volumesnapshotcontents.snapshot.storage.k8s.io
I1014 13:43:31.519357 1 handler.go:286] Adding GroupVersion gadget.kinvolk.io v1alpha1 to ResourceManager
W1014 13:43:32.552131 1 cacher.go:171] Terminating all watchers from cacher traces.gadget.kinvolk.io
I1014 13:43:37.064523 1 controller.go:615] quota admission added evaluator for: ingresses.networking.k8s.io
I1014 13:43:37.356768 1 alloc.go:330] "allocated clusterIPs" service="default/nginx" clusterIPs={"IPv4":"10.101.40.124"}
I1014 13:45:57.249198 1 alloc.go:330] "allocated clusterIPs" service="default/hello-world-app" clusterIPs={"IPv4":"10.98.166.184"}
==> kube-controller-manager [3e4cf70c881841234f19b88ecc5497bac13aae34c6605d6a448de2ce998ca7a8] <==
W1014 13:44:03.814541 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1014 13:44:03.814582 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W1014 13:44:22.619399 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1014 13:44:22.619441 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W1014 13:44:35.933961 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1014 13:44:35.934003 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W1014 13:44:37.853292 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1014 13:44:37.853334 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W1014 13:44:48.863090 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1014 13:44:48.863130 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W1014 13:45:19.551214 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1014 13:45:19.551254 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W1014 13:45:22.399240 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1014 13:45:22.399279 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W1014 13:45:29.938406 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1014 13:45:29.938535 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W1014 13:45:33.936809 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1014 13:45:33.936849 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W1014 13:45:51.613868 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E1014 13:45:51.613913 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
I1014 13:45:56.886501 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="default/hello-world-app-55bf9c44b4" duration="33.430619ms"
I1014 13:45:56.912545 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="default/hello-world-app-55bf9c44b4" duration="25.922441ms"
I1014 13:45:56.912699 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="default/hello-world-app-55bf9c44b4" duration="44.48µs"
I1014 13:45:59.206747 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="default/hello-world-app-55bf9c44b4" duration="14.463839ms"
I1014 13:45:59.206831 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="default/hello-world-app-55bf9c44b4" duration="49.518µs"
==> kube-proxy [09ddfab546738d1eab72c46ef4b7d84c7c88574b12d387c037cceeaf1a909255] <==
I1014 13:39:52.294288 1 server_linux.go:66] "Using iptables proxy"
I1014 13:39:52.394712 1 server.go:677] "Successfully retrieved node IP(s)" IPs=["192.168.49.2"]
E1014 13:39:52.394871 1 server.go:234] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I1014 13:39:52.421809 1 server.go:243] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I1014 13:39:52.421919 1 server_linux.go:169] "Using iptables Proxier"
I1014 13:39:52.425428 1 proxier.go:255] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I1014 13:39:52.439398 1 server.go:483] "Version info" version="v1.31.1"
I1014 13:39:52.439423 1 server.go:485] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I1014 13:39:52.440582 1 config.go:199] "Starting service config controller"
I1014 13:39:52.440648 1 shared_informer.go:313] Waiting for caches to sync for service config
I1014 13:39:52.444864 1 config.go:105] "Starting endpoint slice config controller"
I1014 13:39:52.444953 1 shared_informer.go:313] Waiting for caches to sync for endpoint slice config
I1014 13:39:52.445458 1 config.go:328] "Starting node config controller"
I1014 13:39:52.445546 1 shared_informer.go:313] Waiting for caches to sync for node config
I1014 13:39:52.548284 1 shared_informer.go:320] Caches are synced for node config
I1014 13:39:52.548384 1 shared_informer.go:320] Caches are synced for service config
I1014 13:39:52.548437 1 shared_informer.go:320] Caches are synced for endpoint slice config
==> kube-scheduler [62098d11724974c824d47af9ef75592c9f29ddecba7112f8fd3fed3c259db4b8] <==
W1014 13:39:45.325072 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E1014 13:39:45.325155 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
W1014 13:39:45.325293 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E1014 13:39:45.325348 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User \"system:kube-scheduler\" cannot list resource \"pods\" in API group \"\" at the cluster scope" logger="UnhandledError"
W1014 13:39:45.325453 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
E1014 13:39:45.325501 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"statefulsets\" in API group \"apps\" at the cluster scope" logger="UnhandledError"
W1014 13:39:45.325587 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
E1014 13:39:45.325633 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csinodes\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
W1014 13:39:45.326335 1 reflector.go:561] runtime/asm_arm64.s:1222: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
E1014 13:39:45.326407 1 reflector.go:158] "Unhandled Error" err="runtime/asm_arm64.s:1222: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"extension-apiserver-authentication\" is forbidden: User \"system:kube-scheduler\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\"" logger="UnhandledError"
W1014 13:39:45.326552 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
E1014 13:39:45.326605 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError"
W1014 13:39:45.326778 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
E1014 13:39:45.326824 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User \"system:kube-scheduler\" cannot list resource \"namespaces\" in API group \"\" at the cluster scope" logger="UnhandledError"
W1014 13:39:45.328929 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E1014 13:39:45.328970 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope" logger="UnhandledError"
W1014 13:39:45.329020 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E1014 13:39:45.329066 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User \"system:kube-scheduler\" cannot list resource \"services\" in API group \"\" at the cluster scope" logger="UnhandledError"
W1014 13:39:45.329081 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E1014 13:39:45.329201 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
W1014 13:39:45.329139 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E1014 13:39:45.329321 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"storageclasses\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
W1014 13:39:45.329036 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
E1014 13:39:45.329418 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicationcontrollers\" in API group \"\" at the cluster scope" logger="UnhandledError"
I1014 13:39:46.516822 1 shared_informer.go:320] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Oct 14 13:44:37 addons-002422 kubelet[1493]: E1014 13:44:37.386027 1493 eviction_manager.go:212] "Eviction manager: failed to synchronize" err="eviction manager: failed to get HasDedicatedImageFs: missing image stats: &ImageFsInfoResponse{ImageFilesystems:[]*FilesystemUsage{&FilesystemUsage{Timestamp:1728913477385787450,FsId:&FilesystemIdentifier{Mountpoint:/var/lib/containers/storage/overlay-images,},UsedBytes:&UInt64Value{Value:569145,},InodesUsed:&UInt64Value{Value:219,},},},ContainerFilesystems:[]*FilesystemUsage{},}"
Oct 14 13:44:43 addons-002422 kubelet[1493]: I1014 13:44:43.227033 1493 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="default/cloud-spanner-emulator-5b584cc74-fwt5t" secret="" err="secret \"gcp-auth\" not found"
Oct 14 13:44:47 addons-002422 kubelet[1493]: I1014 13:44:47.227064 1493 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="kube-system/nvidia-device-plugin-daemonset-tnngr" secret="" err="secret \"gcp-auth\" not found"
Oct 14 13:44:47 addons-002422 kubelet[1493]: E1014 13:44:47.306468 1493 container_manager_linux.go:513] "Failed to find cgroups of kubelet" err="cpu and memory cgroup hierarchy not unified. cpu: /docker/05e13f44fa23211d41ae7b94d00466d20b84537aca8298c4d05c6211297bec8c, memory: /docker/05e13f44fa23211d41ae7b94d00466d20b84537aca8298c4d05c6211297bec8c/system.slice/kubelet.service"
Oct 14 13:44:47 addons-002422 kubelet[1493]: E1014 13:44:47.388248 1493 eviction_manager.go:257] "Eviction manager: failed to get HasDedicatedImageFs" err="missing image stats: &ImageFsInfoResponse{ImageFilesystems:[]*FilesystemUsage{&FilesystemUsage{Timestamp:1728913487388003739,FsId:&FilesystemIdentifier{Mountpoint:/var/lib/containers/storage/overlay-images,},UsedBytes:&UInt64Value{Value:569145,},InodesUsed:&UInt64Value{Value:219,},},},ContainerFilesystems:[]*FilesystemUsage{},}"
Oct 14 13:44:47 addons-002422 kubelet[1493]: E1014 13:44:47.388443 1493 eviction_manager.go:212] "Eviction manager: failed to synchronize" err="eviction manager: failed to get HasDedicatedImageFs: missing image stats: &ImageFsInfoResponse{ImageFilesystems:[]*FilesystemUsage{&FilesystemUsage{Timestamp:1728913487388003739,FsId:&FilesystemIdentifier{Mountpoint:/var/lib/containers/storage/overlay-images,},UsedBytes:&UInt64Value{Value:569145,},InodesUsed:&UInt64Value{Value:219,},},},ContainerFilesystems:[]*FilesystemUsage{},}"
Oct 14 13:44:57 addons-002422 kubelet[1493]: E1014 13:44:57.390894 1493 eviction_manager.go:257] "Eviction manager: failed to get HasDedicatedImageFs" err="missing image stats: &ImageFsInfoResponse{ImageFilesystems:[]*FilesystemUsage{&FilesystemUsage{Timestamp:1728913497390702142,FsId:&FilesystemIdentifier{Mountpoint:/var/lib/containers/storage/overlay-images,},UsedBytes:&UInt64Value{Value:569145,},InodesUsed:&UInt64Value{Value:219,},},},ContainerFilesystems:[]*FilesystemUsage{},}"
Oct 14 13:44:57 addons-002422 kubelet[1493]: E1014 13:44:57.390930 1493 eviction_manager.go:212] "Eviction manager: failed to synchronize" err="eviction manager: failed to get HasDedicatedImageFs: missing image stats: &ImageFsInfoResponse{ImageFilesystems:[]*FilesystemUsage{&FilesystemUsage{Timestamp:1728913497390702142,FsId:&FilesystemIdentifier{Mountpoint:/var/lib/containers/storage/overlay-images,},UsedBytes:&UInt64Value{Value:569145,},InodesUsed:&UInt64Value{Value:219,},},},ContainerFilesystems:[]*FilesystemUsage{},}"
Oct 14 13:45:07 addons-002422 kubelet[1493]: E1014 13:45:07.393615 1493 eviction_manager.go:257] "Eviction manager: failed to get HasDedicatedImageFs" err="missing image stats: &ImageFsInfoResponse{ImageFilesystems:[]*FilesystemUsage{&FilesystemUsage{Timestamp:1728913507393401233,FsId:&FilesystemIdentifier{Mountpoint:/var/lib/containers/storage/overlay-images,},UsedBytes:&UInt64Value{Value:569145,},InodesUsed:&UInt64Value{Value:219,},},},ContainerFilesystems:[]*FilesystemUsage{},}"
Oct 14 13:45:07 addons-002422 kubelet[1493]: E1014 13:45:07.393657 1493 eviction_manager.go:212] "Eviction manager: failed to synchronize" err="eviction manager: failed to get HasDedicatedImageFs: missing image stats: &ImageFsInfoResponse{ImageFilesystems:[]*FilesystemUsage{&FilesystemUsage{Timestamp:1728913507393401233,FsId:&FilesystemIdentifier{Mountpoint:/var/lib/containers/storage/overlay-images,},UsedBytes:&UInt64Value{Value:569145,},InodesUsed:&UInt64Value{Value:219,},},},ContainerFilesystems:[]*FilesystemUsage{},}"
Oct 14 13:45:17 addons-002422 kubelet[1493]: E1014 13:45:17.396082 1493 eviction_manager.go:257] "Eviction manager: failed to get HasDedicatedImageFs" err="missing image stats: &ImageFsInfoResponse{ImageFilesystems:[]*FilesystemUsage{&FilesystemUsage{Timestamp:1728913517395868129,FsId:&FilesystemIdentifier{Mountpoint:/var/lib/containers/storage/overlay-images,},UsedBytes:&UInt64Value{Value:569145,},InodesUsed:&UInt64Value{Value:219,},},},ContainerFilesystems:[]*FilesystemUsage{},}"
Oct 14 13:45:17 addons-002422 kubelet[1493]: E1014 13:45:17.396120 1493 eviction_manager.go:212] "Eviction manager: failed to synchronize" err="eviction manager: failed to get HasDedicatedImageFs: missing image stats: &ImageFsInfoResponse{ImageFilesystems:[]*FilesystemUsage{&FilesystemUsage{Timestamp:1728913517395868129,FsId:&FilesystemIdentifier{Mountpoint:/var/lib/containers/storage/overlay-images,},UsedBytes:&UInt64Value{Value:569145,},InodesUsed:&UInt64Value{Value:219,},},},ContainerFilesystems:[]*FilesystemUsage{},}"
Oct 14 13:45:27 addons-002422 kubelet[1493]: E1014 13:45:27.399000 1493 eviction_manager.go:257] "Eviction manager: failed to get HasDedicatedImageFs" err="missing image stats: &ImageFsInfoResponse{ImageFilesystems:[]*FilesystemUsage{&FilesystemUsage{Timestamp:1728913527398766059,FsId:&FilesystemIdentifier{Mountpoint:/var/lib/containers/storage/overlay-images,},UsedBytes:&UInt64Value{Value:569145,},InodesUsed:&UInt64Value{Value:219,},},},ContainerFilesystems:[]*FilesystemUsage{},}"
Oct 14 13:45:27 addons-002422 kubelet[1493]: E1014 13:45:27.399037 1493 eviction_manager.go:212] "Eviction manager: failed to synchronize" err="eviction manager: failed to get HasDedicatedImageFs: missing image stats: &ImageFsInfoResponse{ImageFilesystems:[]*FilesystemUsage{&FilesystemUsage{Timestamp:1728913527398766059,FsId:&FilesystemIdentifier{Mountpoint:/var/lib/containers/storage/overlay-images,},UsedBytes:&UInt64Value{Value:569145,},InodesUsed:&UInt64Value{Value:219,},},},ContainerFilesystems:[]*FilesystemUsage{},}"
Oct 14 13:45:37 addons-002422 kubelet[1493]: E1014 13:45:37.402094 1493 eviction_manager.go:257] "Eviction manager: failed to get HasDedicatedImageFs" err="missing image stats: &ImageFsInfoResponse{ImageFilesystems:[]*FilesystemUsage{&FilesystemUsage{Timestamp:1728913537401849429,FsId:&FilesystemIdentifier{Mountpoint:/var/lib/containers/storage/overlay-images,},UsedBytes:&UInt64Value{Value:569145,},InodesUsed:&UInt64Value{Value:219,},},},ContainerFilesystems:[]*FilesystemUsage{},}"
Oct 14 13:45:37 addons-002422 kubelet[1493]: E1014 13:45:37.402136 1493 eviction_manager.go:212] "Eviction manager: failed to synchronize" err="eviction manager: failed to get HasDedicatedImageFs: missing image stats: &ImageFsInfoResponse{ImageFilesystems:[]*FilesystemUsage{&FilesystemUsage{Timestamp:1728913537401849429,FsId:&FilesystemIdentifier{Mountpoint:/var/lib/containers/storage/overlay-images,},UsedBytes:&UInt64Value{Value:569145,},InodesUsed:&UInt64Value{Value:219,},},},ContainerFilesystems:[]*FilesystemUsage{},}"
Oct 14 13:45:41 addons-002422 kubelet[1493]: I1014 13:45:41.227296 1493 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="default/busybox" secret="" err="secret \"gcp-auth\" not found"
Oct 14 13:45:46 addons-002422 kubelet[1493]: I1014 13:45:46.226928 1493 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="default/cloud-spanner-emulator-5b584cc74-fwt5t" secret="" err="secret \"gcp-auth\" not found"
Oct 14 13:45:47 addons-002422 kubelet[1493]: E1014 13:45:47.405884 1493 eviction_manager.go:257] "Eviction manager: failed to get HasDedicatedImageFs" err="missing image stats: &ImageFsInfoResponse{ImageFilesystems:[]*FilesystemUsage{&FilesystemUsage{Timestamp:1728913547404719659,FsId:&FilesystemIdentifier{Mountpoint:/var/lib/containers/storage/overlay-images,},UsedBytes:&UInt64Value{Value:569145,},InodesUsed:&UInt64Value{Value:219,},},},ContainerFilesystems:[]*FilesystemUsage{},}"
Oct 14 13:45:47 addons-002422 kubelet[1493]: E1014 13:45:47.405936 1493 eviction_manager.go:212] "Eviction manager: failed to synchronize" err="eviction manager: failed to get HasDedicatedImageFs: missing image stats: &ImageFsInfoResponse{ImageFilesystems:[]*FilesystemUsage{&FilesystemUsage{Timestamp:1728913547404719659,FsId:&FilesystemIdentifier{Mountpoint:/var/lib/containers/storage/overlay-images,},UsedBytes:&UInt64Value{Value:569145,},InodesUsed:&UInt64Value{Value:219,},},},ContainerFilesystems:[]*FilesystemUsage{},}"
Oct 14 13:45:56 addons-002422 kubelet[1493]: I1014 13:45:56.891800 1493 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="default/nginx" podStartSLOduration=139.122120863 podStartE2EDuration="2m19.891784176s" podCreationTimestamp="2024-10-14 13:43:37 +0000 UTC" firstStartedPulling="2024-10-14 13:43:37.622463466 +0000 UTC m=+230.495741694" lastFinishedPulling="2024-10-14 13:43:38.39212678 +0000 UTC m=+231.265405007" observedRunningTime="2024-10-14 13:43:38.904330519 +0000 UTC m=+231.777608755" watchObservedRunningTime="2024-10-14 13:45:56.891784176 +0000 UTC m=+369.765062403"
Oct 14 13:45:56 addons-002422 kubelet[1493]: I1014 13:45:56.967394 1493 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nkvjj\" (UniqueName: \"kubernetes.io/projected/f5f83fdb-25be-40d5-9d3f-0e790983e8df-kube-api-access-nkvjj\") pod \"hello-world-app-55bf9c44b4-pfhmd\" (UID: \"f5f83fdb-25be-40d5-9d3f-0e790983e8df\") " pod="default/hello-world-app-55bf9c44b4-pfhmd"
Oct 14 13:45:57 addons-002422 kubelet[1493]: E1014 13:45:57.407799 1493 eviction_manager.go:257] "Eviction manager: failed to get HasDedicatedImageFs" err="missing image stats: &ImageFsInfoResponse{ImageFilesystems:[]*FilesystemUsage{&FilesystemUsage{Timestamp:1728913557407559699,FsId:&FilesystemIdentifier{Mountpoint:/var/lib/containers/storage/overlay-images,},UsedBytes:&UInt64Value{Value:569145,},InodesUsed:&UInt64Value{Value:219,},},},ContainerFilesystems:[]*FilesystemUsage{},}"
Oct 14 13:45:57 addons-002422 kubelet[1493]: E1014 13:45:57.407840 1493 eviction_manager.go:212] "Eviction manager: failed to synchronize" err="eviction manager: failed to get HasDedicatedImageFs: missing image stats: &ImageFsInfoResponse{ImageFilesystems:[]*FilesystemUsage{&FilesystemUsage{Timestamp:1728913557407559699,FsId:&FilesystemIdentifier{Mountpoint:/var/lib/containers/storage/overlay-images,},UsedBytes:&UInt64Value{Value:569145,},InodesUsed:&UInt64Value{Value:219,},},},ContainerFilesystems:[]*FilesystemUsage{},}"
Oct 14 13:45:59 addons-002422 kubelet[1493]: I1014 13:45:59.191201 1493 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="default/hello-world-app-55bf9c44b4-pfhmd" podStartSLOduration=2.044239665 podStartE2EDuration="3.191183649s" podCreationTimestamp="2024-10-14 13:45:56 +0000 UTC" firstStartedPulling="2024-10-14 13:45:57.25463384 +0000 UTC m=+370.127912068" lastFinishedPulling="2024-10-14 13:45:58.401577824 +0000 UTC m=+371.274856052" observedRunningTime="2024-10-14 13:45:59.190453344 +0000 UTC m=+372.063731572" watchObservedRunningTime="2024-10-14 13:45:59.191183649 +0000 UTC m=+372.064461876"
==> storage-provisioner [749f7ebdaeaf50739e47418bda3ae0c2d5a85bd04259b5f9d851861c9e661f83] <==
I1014 13:40:09.372284 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I1014 13:40:09.406109 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I1014 13:40:09.406166 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I1014 13:40:09.433641 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I1014 13:40:09.434046 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_addons-002422_b643fb17-4d87-4a06-8a88-cc3ffff5f150!
I1014 13:40:09.435321 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"8963a5d4-969c-4353-a393-1ec58810a372", APIVersion:"v1", ResourceVersion:"902", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' addons-002422_b643fb17-4d87-4a06-8a88-cc3ffff5f150 became leader
I1014 13:40:09.535214 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_addons-002422_b643fb17-4d87-4a06-8a88-cc3ffff5f150!
-- /stdout --
helpers_test.go:254: (dbg) Run: out/minikube-linux-arm64 status --format={{.APIServer}} -p addons-002422 -n addons-002422
helpers_test.go:261: (dbg) Run: kubectl --context addons-002422 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:272: non-running pods: ingress-nginx-admission-create-wp9ww ingress-nginx-admission-patch-m2gmb
helpers_test.go:274: ======> post-mortem[TestAddons/parallel/Ingress]: describe non-running pods <======
helpers_test.go:277: (dbg) Run: kubectl --context addons-002422 describe pod ingress-nginx-admission-create-wp9ww ingress-nginx-admission-patch-m2gmb
helpers_test.go:277: (dbg) Non-zero exit: kubectl --context addons-002422 describe pod ingress-nginx-admission-create-wp9ww ingress-nginx-admission-patch-m2gmb: exit status 1 (145.659609ms)
** stderr **
Error from server (NotFound): pods "ingress-nginx-admission-create-wp9ww" not found
Error from server (NotFound): pods "ingress-nginx-admission-patch-m2gmb" not found
** /stderr **
helpers_test.go:279: kubectl --context addons-002422 describe pod ingress-nginx-admission-create-wp9ww ingress-nginx-admission-patch-m2gmb: exit status 1
addons_test.go:988: (dbg) Run: out/minikube-linux-arm64 -p addons-002422 addons disable ingress-dns --alsologtostderr -v=1
addons_test.go:988: (dbg) Done: out/minikube-linux-arm64 -p addons-002422 addons disable ingress-dns --alsologtostderr -v=1: (1.064963384s)
addons_test.go:988: (dbg) Run: out/minikube-linux-arm64 -p addons-002422 addons disable ingress --alsologtostderr -v=1
addons_test.go:988: (dbg) Done: out/minikube-linux-arm64 -p addons-002422 addons disable ingress --alsologtostderr -v=1: (7.761015492s)
--- FAIL: TestAddons/parallel/Ingress (152.54s)