=== RUN TestAddons/parallel/Ingress
=== PAUSE TestAddons/parallel/Ingress
=== CONT TestAddons/parallel/Ingress
addons_test.go:207: (dbg) Run: kubectl --context addons-913502 wait --for=condition=ready --namespace=ingress-nginx pod --selector=app.kubernetes.io/component=controller --timeout=90s
addons_test.go:232: (dbg) Run: kubectl --context addons-913502 replace --force -f testdata/nginx-ingress-v1.yaml
addons_test.go:245: (dbg) Run: kubectl --context addons-913502 replace --force -f testdata/nginx-pod-svc.yaml
addons_test.go:250: (dbg) TestAddons/parallel/Ingress: waiting 8m0s for pods matching "run=nginx" in namespace "default" ...
helpers_test.go:344: "nginx" [6a367c0e-00d0-4f0c-a462-bf6e428f5d03] Pending / Ready:ContainersNotReady (containers with unready status: [nginx]) / ContainersReady:ContainersNotReady (containers with unready status: [nginx])
helpers_test.go:344: "nginx" [6a367c0e-00d0-4f0c-a462-bf6e428f5d03] Running
addons_test.go:250: (dbg) TestAddons/parallel/Ingress: run=nginx healthy within 12.004072724s
addons_test.go:262: (dbg) Run: out/minikube-linux-amd64 -p addons-913502 ssh "curl -s http://127.0.0.1/ -H 'Host: nginx.example.com'"
addons_test.go:262: (dbg) Non-zero exit: out/minikube-linux-amd64 -p addons-913502 ssh "curl -s http://127.0.0.1/ -H 'Host: nginx.example.com'": exit status 1 (2m10.459358995s)
** stderr **
ssh: Process exited with status 28
** /stderr **
addons_test.go:278: failed to get expected response from http://127.0.0.1/ within minikube: exit status 1
addons_test.go:286: (dbg) Run: kubectl --context addons-913502 replace --force -f testdata/ingress-dns-example-v1.yaml
addons_test.go:291: (dbg) Run: out/minikube-linux-amd64 -p addons-913502 ip
addons_test.go:297: (dbg) Run: nslookup hello-john.test 192.168.49.2
addons_test.go:306: (dbg) Run: out/minikube-linux-amd64 -p addons-913502 addons disable ingress-dns --alsologtostderr -v=1
addons_test.go:306: (dbg) Done: out/minikube-linux-amd64 -p addons-913502 addons disable ingress-dns --alsologtostderr -v=1: (1.389781743s)
addons_test.go:311: (dbg) Run: out/minikube-linux-amd64 -p addons-913502 addons disable ingress --alsologtostderr -v=1
addons_test.go:311: (dbg) Done: out/minikube-linux-amd64 -p addons-913502 addons disable ingress --alsologtostderr -v=1: (7.636097846s)
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:230: ======> post-mortem[TestAddons/parallel/Ingress]: docker inspect <======
helpers_test.go:231: (dbg) Run: docker inspect addons-913502
helpers_test.go:235: (dbg) docker inspect addons-913502:
-- stdout --
[
{
"Id": "3a3c4bea7929182449776c05fa455c4211c81c7e833202acb79be3ab764f9ccb",
"Created": "2024-02-13T23:01:55.571688939Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 75592,
"ExitCode": 0,
"Error": "",
"StartedAt": "2024-02-13T23:01:55.862518621Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:9941de2e064a4a6a7155bfc66cedd2854b8c725b77bb8d4eaf81bef39f951dd7",
"ResolvConfPath": "/var/lib/docker/containers/3a3c4bea7929182449776c05fa455c4211c81c7e833202acb79be3ab764f9ccb/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/3a3c4bea7929182449776c05fa455c4211c81c7e833202acb79be3ab764f9ccb/hostname",
"HostsPath": "/var/lib/docker/containers/3a3c4bea7929182449776c05fa455c4211c81c7e833202acb79be3ab764f9ccb/hosts",
"LogPath": "/var/lib/docker/containers/3a3c4bea7929182449776c05fa455c4211c81c7e833202acb79be3ab764f9ccb/3a3c4bea7929182449776c05fa455c4211c81c7e833202acb79be3ab764f9ccb-json.log",
"Name": "/addons-913502",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"addons-913502:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {
"max-size": "100m"
}
},
"NetworkMode": "addons-913502",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 4194304000,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 8388608000,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"LowerDir": "/var/lib/docker/overlay2/3e8f3d3d2a707c488f0937be59982e109a3023b56a47058f35f66fe824106805-init/diff:/var/lib/docker/overlay2/4fe14e78c622f13dfc4094e03ac245950865fc60884691f5477756f62ef198c3/diff",
"MergedDir": "/var/lib/docker/overlay2/3e8f3d3d2a707c488f0937be59982e109a3023b56a47058f35f66fe824106805/merged",
"UpperDir": "/var/lib/docker/overlay2/3e8f3d3d2a707c488f0937be59982e109a3023b56a47058f35f66fe824106805/diff",
"WorkDir": "/var/lib/docker/overlay2/3e8f3d3d2a707c488f0937be59982e109a3023b56a47058f35f66fe824106805/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "addons-913502",
"Source": "/var/lib/docker/volumes/addons-913502/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "addons-913502",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1704759386-17866@sha256:8c3c33047f9bc285e1f5f2a5aa14744a2fe04c58478f02f77b06169dea8dd3f0",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "addons-913502",
"name.minikube.sigs.k8s.io": "addons-913502",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "977adb16ddd7936c943d24cf6bbed1c2fbcc9892f88187bad5ac1a30fc183f68",
"SandboxKey": "/var/run/docker/netns/977adb16ddd7",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32772"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32771"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32768"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32770"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32769"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"addons-913502": {
"IPAMConfig": {
"IPv4Address": "192.168.49.2"
},
"Links": null,
"Aliases": [
"3a3c4bea7929",
"addons-913502"
],
"MacAddress": "02:42:c0:a8:31:02",
"NetworkID": "21cd6f2cdff20306f8d612e0007531ff7b32f4461a8e90920eb3d5695c5858c9",
"EndpointID": "58e39049db5d55c8df8a1b40c1fbde3584d2fa7404d203924a57554b2890a41a",
"Gateway": "192.168.49.1",
"IPAddress": "192.168.49.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DriverOpts": null,
"DNSNames": [
"addons-913502",
"3a3c4bea7929"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:239: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p addons-913502 -n addons-913502
helpers_test.go:244: <<< TestAddons/parallel/Ingress FAILED: start of post-mortem logs <<<
helpers_test.go:245: ======> post-mortem[TestAddons/parallel/Ingress]: minikube logs <======
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 -p addons-913502 logs -n 25
helpers_test.go:247: (dbg) Done: out/minikube-linux-amd64 -p addons-913502 logs -n 25: (1.197947015s)
helpers_test.go:252: TestAddons/parallel/Ingress logs:
-- stdout --
==> Audit <==
|---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
| Command | Args | Profile | User | Version | Start Time | End Time |
|---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
| delete | -p download-only-658548 | download-only-658548 | jenkins | v1.32.0 | 13 Feb 24 23:01 UTC | 13 Feb 24 23:01 UTC |
| delete | -p download-only-940739 | download-only-940739 | jenkins | v1.32.0 | 13 Feb 24 23:01 UTC | 13 Feb 24 23:01 UTC |
| start | --download-only -p | download-docker-574132 | jenkins | v1.32.0 | 13 Feb 24 23:01 UTC | |
| | download-docker-574132 | | | | | |
| | --alsologtostderr | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=crio | | | | | |
| delete | -p download-docker-574132 | download-docker-574132 | jenkins | v1.32.0 | 13 Feb 24 23:01 UTC | 13 Feb 24 23:01 UTC |
| start | --download-only -p | binary-mirror-182974 | jenkins | v1.32.0 | 13 Feb 24 23:01 UTC | |
| | binary-mirror-182974 | | | | | |
| | --alsologtostderr | | | | | |
| | --binary-mirror | | | | | |
| | http://127.0.0.1:45437 | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=crio | | | | | |
| delete | -p binary-mirror-182974 | binary-mirror-182974 | jenkins | v1.32.0 | 13 Feb 24 23:01 UTC | 13 Feb 24 23:01 UTC |
| addons | enable dashboard -p | addons-913502 | jenkins | v1.32.0 | 13 Feb 24 23:01 UTC | |
| | addons-913502 | | | | | |
| addons | disable dashboard -p | addons-913502 | jenkins | v1.32.0 | 13 Feb 24 23:01 UTC | |
| | addons-913502 | | | | | |
| start | -p addons-913502 --wait=true | addons-913502 | jenkins | v1.32.0 | 13 Feb 24 23:01 UTC | 13 Feb 24 23:04 UTC |
| | --memory=4000 --alsologtostderr | | | | | |
| | --addons=registry | | | | | |
| | --addons=metrics-server | | | | | |
| | --addons=volumesnapshots | | | | | |
| | --addons=csi-hostpath-driver | | | | | |
| | --addons=gcp-auth | | | | | |
| | --addons=cloud-spanner | | | | | |
| | --addons=inspektor-gadget | | | | | |
| | --addons=storage-provisioner-rancher | | | | | |
| | --addons=nvidia-device-plugin | | | | | |
| | --addons=yakd --driver=docker | | | | | |
| | --container-runtime=crio | | | | | |
| | --addons=ingress | | | | | |
| | --addons=ingress-dns | | | | | |
| | --addons=helm-tiller | | | | | |
| addons | enable headlamp | addons-913502 | jenkins | v1.32.0 | 13 Feb 24 23:04 UTC | 13 Feb 24 23:04 UTC |
| | -p addons-913502 | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-913502 addons disable | addons-913502 | jenkins | v1.32.0 | 13 Feb 24 23:04 UTC | 13 Feb 24 23:04 UTC |
| | helm-tiller --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| ip | addons-913502 ip | addons-913502 | jenkins | v1.32.0 | 13 Feb 24 23:04 UTC | 13 Feb 24 23:04 UTC |
| addons | addons-913502 addons disable | addons-913502 | jenkins | v1.32.0 | 13 Feb 24 23:04 UTC | 13 Feb 24 23:04 UTC |
| | registry --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| addons | addons-913502 addons | addons-913502 | jenkins | v1.32.0 | 13 Feb 24 23:04 UTC | 13 Feb 24 23:04 UTC |
| | disable metrics-server | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | disable inspektor-gadget -p | addons-913502 | jenkins | v1.32.0 | 13 Feb 24 23:04 UTC | 13 Feb 24 23:04 UTC |
| | addons-913502 | | | | | |
| addons | disable nvidia-device-plugin | addons-913502 | jenkins | v1.32.0 | 13 Feb 24 23:04 UTC | 13 Feb 24 23:04 UTC |
| | -p addons-913502 | | | | | |
| ssh | addons-913502 ssh curl -s | addons-913502 | jenkins | v1.32.0 | 13 Feb 24 23:04 UTC | |
| | http://127.0.0.1/ -H 'Host: | | | | | |
| | nginx.example.com' | | | | | |
| addons | disable cloud-spanner -p | addons-913502 | jenkins | v1.32.0 | 13 Feb 24 23:04 UTC | 13 Feb 24 23:04 UTC |
| | addons-913502 | | | | | |
| ssh | addons-913502 ssh cat | addons-913502 | jenkins | v1.32.0 | 13 Feb 24 23:04 UTC | 13 Feb 24 23:04 UTC |
| | /opt/local-path-provisioner/pvc-d0c9bd29-9bf8-4b15-8147-542eee087336_default_test-pvc/file1 | | | | | |
| addons | addons-913502 addons disable | addons-913502 | jenkins | v1.32.0 | 13 Feb 24 23:04 UTC | 13 Feb 24 23:04 UTC |
| | storage-provisioner-rancher | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-913502 addons | addons-913502 | jenkins | v1.32.0 | 13 Feb 24 23:04 UTC | 13 Feb 24 23:05 UTC |
| | disable csi-hostpath-driver | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-913502 addons | addons-913502 | jenkins | v1.32.0 | 13 Feb 24 23:05 UTC | 13 Feb 24 23:05 UTC |
| | disable volumesnapshots | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| ip | addons-913502 ip | addons-913502 | jenkins | v1.32.0 | 13 Feb 24 23:06 UTC | 13 Feb 24 23:06 UTC |
| addons | addons-913502 addons disable | addons-913502 | jenkins | v1.32.0 | 13 Feb 24 23:06 UTC | 13 Feb 24 23:06 UTC |
| | ingress-dns --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| addons | addons-913502 addons disable | addons-913502 | jenkins | v1.32.0 | 13 Feb 24 23:06 UTC | 13 Feb 24 23:06 UTC |
| | ingress --alsologtostderr -v=1 | | | | | |
|---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
==> Last Start <==
Log file created at: 2024/02/13 23:01:33
Running on machine: ubuntu-20-agent-12
Binary: Built with gc go1.21.6 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I0213 23:01:33.802457 74928 out.go:291] Setting OutFile to fd 1 ...
I0213 23:01:33.802751 74928 out.go:338] TERM=,COLORTERM=, which probably does not support color
I0213 23:01:33.802764 74928 out.go:304] Setting ErrFile to fd 2...
I0213 23:01:33.802772 74928 out.go:338] TERM=,COLORTERM=, which probably does not support color
I0213 23:01:33.802964 74928 root.go:338] Updating PATH: /home/jenkins/minikube-integration/18169-66678/.minikube/bin
I0213 23:01:33.803588 74928 out.go:298] Setting JSON to false
I0213 23:01:33.804445 74928 start.go:128] hostinfo: {"hostname":"ubuntu-20-agent-12","uptime":6241,"bootTime":1707859053,"procs":173,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1051-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I0213 23:01:33.804522 74928 start.go:138] virtualization: kvm guest
I0213 23:01:33.807145 74928 out.go:177] * [addons-913502] minikube v1.32.0 on Ubuntu 20.04 (kvm/amd64)
I0213 23:01:33.808659 74928 out.go:177] - MINIKUBE_LOCATION=18169
I0213 23:01:33.808736 74928 notify.go:220] Checking for updates...
I0213 23:01:33.810211 74928 out.go:177] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I0213 23:01:33.811749 74928 out.go:177] - KUBECONFIG=/home/jenkins/minikube-integration/18169-66678/kubeconfig
I0213 23:01:33.813408 74928 out.go:177] - MINIKUBE_HOME=/home/jenkins/minikube-integration/18169-66678/.minikube
I0213 23:01:33.814940 74928 out.go:177] - MINIKUBE_BIN=out/minikube-linux-amd64
I0213 23:01:33.816553 74928 out.go:177] - MINIKUBE_FORCE_SYSTEMD=
I0213 23:01:33.818237 74928 driver.go:392] Setting default libvirt URI to qemu:///system
I0213 23:01:33.838950 74928 docker.go:122] docker version: linux-25.0.3:Docker Engine - Community
I0213 23:01:33.839081 74928 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0213 23:01:33.887533 74928 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:29 OomKillDisable:true NGoroutines:50 SystemTime:2024-02-13 23:01:33.879264713 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1051-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:x86
_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33647984640 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-12 Labels:[] ExperimentalBuild:false ServerVersion:25.0.3 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:ae07eda36dd25f8a1b98dfbf587313b99c0190bb Expected:ae07eda36dd25f8a1b98dfbf587313b99c0190bb} RuncCommit:{ID:v1.1.12-0-g51d5e94 Expected:v1.1.12-0-g51d5e94} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerEr
rors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.12.1] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.24.5] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0213 23:01:33.887640 74928 docker.go:295] overlay module found
I0213 23:01:33.889521 74928 out.go:177] * Using the docker driver based on user configuration
I0213 23:01:33.890753 74928 start.go:298] selected driver: docker
I0213 23:01:33.890764 74928 start.go:902] validating driver "docker" against <nil>
I0213 23:01:33.890773 74928 start.go:913] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I0213 23:01:33.891502 74928 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0213 23:01:33.939403 74928 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:29 OomKillDisable:true NGoroutines:50 SystemTime:2024-02-13 23:01:33.931405034 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1051-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:x86
_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33647984640 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-12 Labels:[] ExperimentalBuild:false ServerVersion:25.0.3 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:ae07eda36dd25f8a1b98dfbf587313b99c0190bb Expected:ae07eda36dd25f8a1b98dfbf587313b99c0190bb} RuncCommit:{ID:v1.1.12-0-g51d5e94 Expected:v1.1.12-0-g51d5e94} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerEr
rors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.12.1] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.24.5] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0213 23:01:33.939552 74928 start_flags.go:307] no existing cluster config was found, will generate one from the flags
I0213 23:01:33.939753 74928 start_flags.go:927] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0213 23:01:33.941528 74928 out.go:177] * Using Docker driver with root privileges
I0213 23:01:33.943037 74928 cni.go:84] Creating CNI manager for ""
I0213 23:01:33.943056 74928 cni.go:143] "docker" driver + "crio" runtime found, recommending kindnet
I0213 23:01:33.943064 74928 start_flags.go:316] Found "CNI" CNI - setting NetworkPlugin=cni
I0213 23:01:33.943079 74928 start_flags.go:321] config:
{Name:addons-913502 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1704759386-17866@sha256:8c3c33047f9bc285e1f5f2a5aa14744a2fe04c58478f02f77b06169dea8dd3f0 Memory:4000 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.4 ClusterName:addons-913502 Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRI
Socket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs:}
I0213 23:01:33.944525 74928 out.go:177] * Starting control plane node addons-913502 in cluster addons-913502
I0213 23:01:33.945838 74928 cache.go:121] Beginning downloading kic base image for docker with crio
I0213 23:01:33.947351 74928 out.go:177] * Pulling base image v0.0.42-1704759386-17866 ...
I0213 23:01:33.948796 74928 preload.go:132] Checking if preload exists for k8s version v1.28.4 and runtime crio
I0213 23:01:33.948823 74928 image.go:79] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1704759386-17866@sha256:8c3c33047f9bc285e1f5f2a5aa14744a2fe04c58478f02f77b06169dea8dd3f0 in local docker daemon
I0213 23:01:33.948830 74928 preload.go:148] Found local preload: /home/jenkins/minikube-integration/18169-66678/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.4-cri-o-overlay-amd64.tar.lz4
I0213 23:01:33.948842 74928 cache.go:56] Caching tarball of preloaded images
I0213 23:01:33.948922 74928 preload.go:174] Found /home/jenkins/minikube-integration/18169-66678/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.4-cri-o-overlay-amd64.tar.lz4 in cache, skipping download
I0213 23:01:33.948932 74928 cache.go:59] Finished verifying existence of preloaded tar for v1.28.4 on crio
I0213 23:01:33.949212 74928 profile.go:148] Saving config to /home/jenkins/minikube-integration/18169-66678/.minikube/profiles/addons-913502/config.json ...
I0213 23:01:33.949233 74928 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/18169-66678/.minikube/profiles/addons-913502/config.json: {Name:mkda011f67b178e30142640d19faa773cc1a510c Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0213 23:01:33.963354 74928 cache.go:149] Downloading gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1704759386-17866@sha256:8c3c33047f9bc285e1f5f2a5aa14744a2fe04c58478f02f77b06169dea8dd3f0 to local cache
I0213 23:01:33.963477 74928 image.go:63] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1704759386-17866@sha256:8c3c33047f9bc285e1f5f2a5aa14744a2fe04c58478f02f77b06169dea8dd3f0 in local cache directory
I0213 23:01:33.963499 74928 image.go:66] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1704759386-17866@sha256:8c3c33047f9bc285e1f5f2a5aa14744a2fe04c58478f02f77b06169dea8dd3f0 in local cache directory, skipping pull
I0213 23:01:33.963504 74928 image.go:105] gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1704759386-17866@sha256:8c3c33047f9bc285e1f5f2a5aa14744a2fe04c58478f02f77b06169dea8dd3f0 exists in cache, skipping pull
I0213 23:01:33.963517 74928 cache.go:152] successfully saved gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1704759386-17866@sha256:8c3c33047f9bc285e1f5f2a5aa14744a2fe04c58478f02f77b06169dea8dd3f0 as a tarball
I0213 23:01:33.963528 74928 cache.go:162] Loading gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1704759386-17866@sha256:8c3c33047f9bc285e1f5f2a5aa14744a2fe04c58478f02f77b06169dea8dd3f0 from local cache
I0213 23:01:45.779067 74928 cache.go:164] successfully loaded and using gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1704759386-17866@sha256:8c3c33047f9bc285e1f5f2a5aa14744a2fe04c58478f02f77b06169dea8dd3f0 from cached tarball
I0213 23:01:45.779130 74928 cache.go:194] Successfully downloaded all kic artifacts
I0213 23:01:45.779176 74928 start.go:365] acquiring machines lock for addons-913502: {Name:mkef1676655d6663ccf6dbaf971e7bc2d4264742 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0213 23:01:45.779300 74928 start.go:369] acquired machines lock for "addons-913502" in 101.791µs
I0213 23:01:45.779340 74928 start.go:93] Provisioning new machine with config: &{Name:addons-913502 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1704759386-17866@sha256:8c3c33047f9bc285e1f5f2a5aa14744a2fe04c58478f02f77b06169dea8dd3f0 Memory:4000 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.4 ClusterName:addons-913502 Namespace:default APIServerName:minikubeCA A
PIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.28.4 ContainerRuntime:crio ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false Disabl
eMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs:} &{Name: IP: Port:8443 KubernetesVersion:v1.28.4 ContainerRuntime:crio ControlPlane:true Worker:true}
I0213 23:01:45.779457 74928 start.go:125] createHost starting for "" (driver="docker")
I0213 23:01:45.842307 74928 out.go:204] * Creating docker container (CPUs=2, Memory=4000MB) ...
I0213 23:01:45.842640 74928 start.go:159] libmachine.API.Create for "addons-913502" (driver="docker")
I0213 23:01:45.842698 74928 client.go:168] LocalClient.Create starting
I0213 23:01:45.842847 74928 main.go:141] libmachine: Creating CA: /home/jenkins/minikube-integration/18169-66678/.minikube/certs/ca.pem
I0213 23:01:45.961344 74928 main.go:141] libmachine: Creating client certificate: /home/jenkins/minikube-integration/18169-66678/.minikube/certs/cert.pem
I0213 23:01:46.263337 74928 cli_runner.go:164] Run: docker network inspect addons-913502 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W0213 23:01:46.278717 74928 cli_runner.go:211] docker network inspect addons-913502 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I0213 23:01:46.278800 74928 network_create.go:281] running [docker network inspect addons-913502] to gather additional debugging logs...
I0213 23:01:46.278825 74928 cli_runner.go:164] Run: docker network inspect addons-913502
W0213 23:01:46.293546 74928 cli_runner.go:211] docker network inspect addons-913502 returned with exit code 1
I0213 23:01:46.293580 74928 network_create.go:284] error running [docker network inspect addons-913502]: docker network inspect addons-913502: exit status 1
stdout:
[]
stderr:
Error response from daemon: network addons-913502 not found
I0213 23:01:46.293598 74928 network_create.go:286] output of [docker network inspect addons-913502]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network addons-913502 not found
** /stderr **
I0213 23:01:46.293706 74928 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0213 23:01:46.308813 74928 network.go:207] using free private subnet 192.168.49.0/24: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc00217fe90}
I0213 23:01:46.308858 74928 network_create.go:124] attempt to create docker network addons-913502 192.168.49.0/24 with gateway 192.168.49.1 and MTU of 1500 ...
I0213 23:01:46.308942 74928 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.49.0/24 --gateway=192.168.49.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=addons-913502 addons-913502
I0213 23:01:46.416637 74928 network_create.go:108] docker network addons-913502 192.168.49.0/24 created
I0213 23:01:46.416681 74928 kic.go:121] calculated static IP "192.168.49.2" for the "addons-913502" container
I0213 23:01:46.416755 74928 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0213 23:01:46.431038 74928 cli_runner.go:164] Run: docker volume create addons-913502 --label name.minikube.sigs.k8s.io=addons-913502 --label created_by.minikube.sigs.k8s.io=true
I0213 23:01:46.509179 74928 oci.go:103] Successfully created a docker volume addons-913502
I0213 23:01:46.509285 74928 cli_runner.go:164] Run: docker run --rm --name addons-913502-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-913502 --entrypoint /usr/bin/test -v addons-913502:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1704759386-17866@sha256:8c3c33047f9bc285e1f5f2a5aa14744a2fe04c58478f02f77b06169dea8dd3f0 -d /var/lib
I0213 23:01:50.336422 74928 cli_runner.go:217] Completed: docker run --rm --name addons-913502-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-913502 --entrypoint /usr/bin/test -v addons-913502:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1704759386-17866@sha256:8c3c33047f9bc285e1f5f2a5aa14744a2fe04c58478f02f77b06169dea8dd3f0 -d /var/lib: (3.827083726s)
I0213 23:01:50.336492 74928 oci.go:107] Successfully prepared a docker volume addons-913502
I0213 23:01:50.336511 74928 preload.go:132] Checking if preload exists for k8s version v1.28.4 and runtime crio
I0213 23:01:50.336537 74928 kic.go:194] Starting extracting preloaded images to volume ...
I0213 23:01:50.336602 74928 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/18169-66678/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.4-cri-o-overlay-amd64.tar.lz4:/preloaded.tar:ro -v addons-913502:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1704759386-17866@sha256:8c3c33047f9bc285e1f5f2a5aa14744a2fe04c58478f02f77b06169dea8dd3f0 -I lz4 -xf /preloaded.tar -C /extractDir
I0213 23:01:55.507474 74928 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/18169-66678/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.4-cri-o-overlay-amd64.tar.lz4:/preloaded.tar:ro -v addons-913502:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1704759386-17866@sha256:8c3c33047f9bc285e1f5f2a5aa14744a2fe04c58478f02f77b06169dea8dd3f0 -I lz4 -xf /preloaded.tar -C /extractDir: (5.17082s)
I0213 23:01:55.507510 74928 kic.go:203] duration metric: took 5.170972 seconds to extract preloaded images to volume
W0213 23:01:55.507709 74928 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I0213 23:01:55.507913 74928 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0213 23:01:55.557651 74928 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname addons-913502 --name addons-913502 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-913502 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=addons-913502 --network addons-913502 --ip 192.168.49.2 --volume addons-913502:/var --security-opt apparmor=unconfined --memory=4000mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1704759386-17866@sha256:8c3c33047f9bc285e1f5f2a5aa14744a2fe04c58478f02f77b06169dea8dd3f0
I0213 23:01:55.870673 74928 cli_runner.go:164] Run: docker container inspect addons-913502 --format={{.State.Running}}
I0213 23:01:55.887910 74928 cli_runner.go:164] Run: docker container inspect addons-913502 --format={{.State.Status}}
I0213 23:01:55.905354 74928 cli_runner.go:164] Run: docker exec addons-913502 stat /var/lib/dpkg/alternatives/iptables
I0213 23:01:55.944529 74928 oci.go:144] the created container "addons-913502" has a running status.
I0213 23:01:55.944562 74928 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/18169-66678/.minikube/machines/addons-913502/id_rsa...
I0213 23:01:56.351673 74928 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/18169-66678/.minikube/machines/addons-913502/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0213 23:01:56.372514 74928 cli_runner.go:164] Run: docker container inspect addons-913502 --format={{.State.Status}}
I0213 23:01:56.388762 74928 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0213 23:01:56.388784 74928 kic_runner.go:114] Args: [docker exec --privileged addons-913502 chown docker:docker /home/docker/.ssh/authorized_keys]
I0213 23:01:56.442339 74928 cli_runner.go:164] Run: docker container inspect addons-913502 --format={{.State.Status}}
I0213 23:01:56.458982 74928 machine.go:88] provisioning docker machine ...
I0213 23:01:56.459083 74928 ubuntu.go:169] provisioning hostname "addons-913502"
I0213 23:01:56.459174 74928 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-913502
I0213 23:01:56.475502 74928 main.go:141] libmachine: Using SSH client type: native
I0213 23:01:56.475851 74928 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x80a4a0] 0x80d180 <nil> [] 0s} 127.0.0.1 32772 <nil> <nil>}
I0213 23:01:56.475871 74928 main.go:141] libmachine: About to run SSH command:
sudo hostname addons-913502 && echo "addons-913502" | sudo tee /etc/hostname
I0213 23:01:56.623054 74928 main.go:141] libmachine: SSH cmd err, output: <nil>: addons-913502
I0213 23:01:56.623165 74928 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-913502
I0213 23:01:56.640216 74928 main.go:141] libmachine: Using SSH client type: native
I0213 23:01:56.640605 74928 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x80a4a0] 0x80d180 <nil> [] 0s} 127.0.0.1 32772 <nil> <nil>}
I0213 23:01:56.640624 74928 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\saddons-913502' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 addons-913502/g' /etc/hosts;
else
echo '127.0.1.1 addons-913502' | sudo tee -a /etc/hosts;
fi
fi
I0213 23:01:56.772385 74928 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0213 23:01:56.772414 74928 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/18169-66678/.minikube CaCertPath:/home/jenkins/minikube-integration/18169-66678/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/18169-66678/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/18169-66678/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/18169-66678/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/18169-66678/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/18169-66678/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/18169-66678/.minikube}
I0213 23:01:56.772442 74928 ubuntu.go:177] setting up certificates
I0213 23:01:56.772452 74928 provision.go:83] configureAuth start
I0213 23:01:56.772509 74928 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-913502
I0213 23:01:56.788622 74928 provision.go:138] copyHostCerts
I0213 23:01:56.788692 74928 exec_runner.go:151] cp: /home/jenkins/minikube-integration/18169-66678/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/18169-66678/.minikube/ca.pem (1078 bytes)
I0213 23:01:56.788815 74928 exec_runner.go:151] cp: /home/jenkins/minikube-integration/18169-66678/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/18169-66678/.minikube/cert.pem (1123 bytes)
I0213 23:01:56.788880 74928 exec_runner.go:151] cp: /home/jenkins/minikube-integration/18169-66678/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/18169-66678/.minikube/key.pem (1679 bytes)
I0213 23:01:56.788946 74928 provision.go:112] generating server cert: /home/jenkins/minikube-integration/18169-66678/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/18169-66678/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/18169-66678/.minikube/certs/ca-key.pem org=jenkins.addons-913502 san=[192.168.49.2 127.0.0.1 localhost 127.0.0.1 minikube addons-913502]
I0213 23:01:56.904275 74928 provision.go:172] copyRemoteCerts
I0213 23:01:56.904360 74928 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0213 23:01:56.904402 74928 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-913502
I0213 23:01:56.920491 74928 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/18169-66678/.minikube/machines/addons-913502/id_rsa Username:docker}
I0213 23:01:57.016710 74928 ssh_runner.go:362] scp /home/jenkins/minikube-integration/18169-66678/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I0213 23:01:57.038007 74928 ssh_runner.go:362] scp /home/jenkins/minikube-integration/18169-66678/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I0213 23:01:57.058913 74928 ssh_runner.go:362] scp /home/jenkins/minikube-integration/18169-66678/.minikube/machines/server.pem --> /etc/docker/server.pem (1216 bytes)
I0213 23:01:57.080012 74928 provision.go:86] duration metric: configureAuth took 307.543796ms
I0213 23:01:57.080041 74928 ubuntu.go:193] setting minikube options for container-runtime
I0213 23:01:57.080242 74928 config.go:182] Loaded profile config "addons-913502": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.28.4
I0213 23:01:57.080401 74928 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-913502
I0213 23:01:57.096275 74928 main.go:141] libmachine: Using SSH client type: native
I0213 23:01:57.096782 74928 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x80a4a0] 0x80d180 <nil> [] 0s} 127.0.0.1 32772 <nil> <nil>}
I0213 23:01:57.096808 74928 main.go:141] libmachine: About to run SSH command:
sudo mkdir -p /etc/sysconfig && printf %!s(MISSING) "
CRIO_MINIKUBE_OPTIONS='--insecure-registry 10.96.0.0/12 '
" | sudo tee /etc/sysconfig/crio.minikube && sudo systemctl restart crio
I0213 23:01:57.314209 74928 main.go:141] libmachine: SSH cmd err, output: <nil>:
CRIO_MINIKUBE_OPTIONS='--insecure-registry 10.96.0.0/12 '
I0213 23:01:57.314236 74928 machine.go:91] provisioned docker machine in 855.228843ms
I0213 23:01:57.314248 74928 client.go:171] LocalClient.Create took 11.471539737s
I0213 23:01:57.314274 74928 start.go:167] duration metric: libmachine.API.Create for "addons-913502" took 11.471638568s
I0213 23:01:57.314286 74928 start.go:300] post-start starting for "addons-913502" (driver="docker")
I0213 23:01:57.314301 74928 start.go:329] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0213 23:01:57.314361 74928 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0213 23:01:57.314409 74928 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-913502
I0213 23:01:57.330000 74928 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/18169-66678/.minikube/machines/addons-913502/id_rsa Username:docker}
I0213 23:01:57.424767 74928 ssh_runner.go:195] Run: cat /etc/os-release
I0213 23:01:57.427764 74928 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0213 23:01:57.427806 74928 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0213 23:01:57.427815 74928 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0213 23:01:57.427822 74928 info.go:137] Remote host: Ubuntu 22.04.3 LTS
I0213 23:01:57.427833 74928 filesync.go:126] Scanning /home/jenkins/minikube-integration/18169-66678/.minikube/addons for local assets ...
I0213 23:01:57.427905 74928 filesync.go:126] Scanning /home/jenkins/minikube-integration/18169-66678/.minikube/files for local assets ...
I0213 23:01:57.427932 74928 start.go:303] post-start completed in 113.639134ms
I0213 23:01:57.428175 74928 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-913502
I0213 23:01:57.444115 74928 profile.go:148] Saving config to /home/jenkins/minikube-integration/18169-66678/.minikube/profiles/addons-913502/config.json ...
I0213 23:01:57.444426 74928 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0213 23:01:57.444484 74928 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-913502
I0213 23:01:57.459446 74928 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/18169-66678/.minikube/machines/addons-913502/id_rsa Username:docker}
I0213 23:01:57.548937 74928 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0213 23:01:57.552957 74928 start.go:128] duration metric: createHost completed in 11.77348393s
I0213 23:01:57.552985 74928 start.go:83] releasing machines lock for "addons-913502", held for 11.773671818s
I0213 23:01:57.553074 74928 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-913502
I0213 23:01:57.568654 74928 ssh_runner.go:195] Run: cat /version.json
I0213 23:01:57.568712 74928 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-913502
I0213 23:01:57.568743 74928 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0213 23:01:57.568805 74928 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-913502
I0213 23:01:57.584943 74928 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/18169-66678/.minikube/machines/addons-913502/id_rsa Username:docker}
I0213 23:01:57.585296 74928 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/18169-66678/.minikube/machines/addons-913502/id_rsa Username:docker}
I0213 23:01:57.765743 74928 ssh_runner.go:195] Run: systemctl --version
I0213 23:01:57.769918 74928 ssh_runner.go:195] Run: sudo sh -c "podman version >/dev/null"
I0213 23:01:57.906448 74928 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0213 23:01:57.910852 74928 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0213 23:01:57.928667 74928 cni.go:221] loopback cni configuration disabled: "/etc/cni/net.d/*loopback.conf*" found
I0213 23:01:57.928758 74928 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%!p(MISSING), " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0213 23:01:57.954316 74928 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I0213 23:01:57.954348 74928 start.go:475] detecting cgroup driver to use...
I0213 23:01:57.954383 74928 detect.go:196] detected "cgroupfs" cgroup driver on host os
I0213 23:01:57.954475 74928 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
I0213 23:01:57.968671 74928 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I0213 23:01:57.978853 74928 docker.go:217] disabling cri-docker service (if available) ...
I0213 23:01:57.978923 74928 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I0213 23:01:57.990968 74928 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I0213 23:01:58.004646 74928 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I0213 23:01:58.089369 74928 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I0213 23:01:58.165327 74928 docker.go:233] disabling docker service ...
I0213 23:01:58.165394 74928 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I0213 23:01:58.182424 74928 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I0213 23:01:58.192691 74928 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I0213 23:01:58.272866 74928 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I0213 23:01:58.353308 74928 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I0213 23:01:58.363415 74928 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %!s(MISSING) "runtime-endpoint: unix:///var/run/crio/crio.sock
" | sudo tee /etc/crictl.yaml"
I0213 23:01:58.378037 74928 crio.go:59] configure cri-o to use "registry.k8s.io/pause:3.9" pause image...
I0213 23:01:58.378098 74928 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|^.*pause_image = .*$|pause_image = "registry.k8s.io/pause:3.9"|' /etc/crio/crio.conf.d/02-crio.conf"
I0213 23:01:58.386524 74928 crio.go:70] configuring cri-o to use "cgroupfs" as cgroup driver...
I0213 23:01:58.386589 74928 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|^.*cgroup_manager = .*$|cgroup_manager = "cgroupfs"|' /etc/crio/crio.conf.d/02-crio.conf"
I0213 23:01:58.395177 74928 ssh_runner.go:195] Run: sh -c "sudo sed -i '/conmon_cgroup = .*/d' /etc/crio/crio.conf.d/02-crio.conf"
I0213 23:01:58.403730 74928 ssh_runner.go:195] Run: sh -c "sudo sed -i '/cgroup_manager = .*/a conmon_cgroup = "pod"' /etc/crio/crio.conf.d/02-crio.conf"
I0213 23:01:58.412174 74928 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0213 23:01:58.419957 74928 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0213 23:01:58.427182 74928 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0213 23:01:58.434497 74928 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0213 23:01:58.504415 74928 ssh_runner.go:195] Run: sudo systemctl restart crio
I0213 23:01:58.594502 74928 start.go:522] Will wait 60s for socket path /var/run/crio/crio.sock
I0213 23:01:58.594583 74928 ssh_runner.go:195] Run: stat /var/run/crio/crio.sock
I0213 23:01:58.598030 74928 start.go:543] Will wait 60s for crictl version
I0213 23:01:58.598086 74928 ssh_runner.go:195] Run: which crictl
I0213 23:01:58.601067 74928 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0213 23:01:58.633715 74928 start.go:559] Version: 0.1.0
RuntimeName: cri-o
RuntimeVersion: 1.24.6
RuntimeApiVersion: v1
I0213 23:01:58.633820 74928 ssh_runner.go:195] Run: crio --version
I0213 23:01:58.667569 74928 ssh_runner.go:195] Run: crio --version
I0213 23:01:58.702347 74928 out.go:177] * Preparing Kubernetes v1.28.4 on CRI-O 1.24.6 ...
I0213 23:01:58.703965 74928 cli_runner.go:164] Run: docker network inspect addons-913502 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0213 23:01:58.719309 74928 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I0213 23:01:58.722871 74928 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0213 23:01:58.732851 74928 preload.go:132] Checking if preload exists for k8s version v1.28.4 and runtime crio
I0213 23:01:58.732915 74928 ssh_runner.go:195] Run: sudo crictl images --output json
I0213 23:01:58.786394 74928 crio.go:496] all images are preloaded for cri-o runtime.
I0213 23:01:58.786417 74928 crio.go:415] Images already preloaded, skipping extraction
I0213 23:01:58.786478 74928 ssh_runner.go:195] Run: sudo crictl images --output json
I0213 23:01:58.818329 74928 crio.go:496] all images are preloaded for cri-o runtime.
I0213 23:01:58.818354 74928 cache_images.go:84] Images are preloaded, skipping loading
I0213 23:01:58.818439 74928 ssh_runner.go:195] Run: crio config
I0213 23:01:58.858847 74928 cni.go:84] Creating CNI manager for ""
I0213 23:01:58.858866 74928 cni.go:143] "docker" driver + "crio" runtime found, recommending kindnet
I0213 23:01:58.858885 74928 kubeadm.go:87] Using pod CIDR: 10.244.0.0/16
I0213 23:01:58.858910 74928 kubeadm.go:176] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8443 KubernetesVersion:v1.28.4 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:addons-913502 NodeName:addons-913502 DNSDomain:cluster.local CRISocket:/var/run/crio/crio.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.49.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kuberne
tes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I0213 23:01:58.859068 74928 kubeadm.go:181] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.49.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/crio/crio.sock
name: "addons-913502"
kubeletExtraArgs:
node-ip: 192.168.49.2
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.28.4
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%!"(MISSING)
nodefs.inodesFree: "0%!"(MISSING)
imagefs.available: "0%!"(MISSING)
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I0213 23:01:58.859142 74928 kubeadm.go:976] kubelet [Unit]
Wants=crio.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.28.4/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroups-per-qos=false --config=/var/lib/kubelet/config.yaml --container-runtime-endpoint=unix:///var/run/crio/crio.sock --enforce-node-allocatable= --hostname-override=addons-913502 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.2
[Install]
config:
{KubernetesVersion:v1.28.4 ClusterName:addons-913502 Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:}
I0213 23:01:58.859207 74928 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.28.4
I0213 23:01:58.868897 74928 binaries.go:44] Found k8s binaries, skipping transfer
I0213 23:01:58.868971 74928 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I0213 23:01:58.876778 74928 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (423 bytes)
I0213 23:01:58.892232 74928 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0213 23:01:58.907990 74928 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2094 bytes)
I0213 23:01:58.923492 74928 ssh_runner.go:195] Run: grep 192.168.49.2 control-plane.minikube.internal$ /etc/hosts
I0213 23:01:58.926829 74928 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0213 23:01:58.936256 74928 certs.go:56] Setting up /home/jenkins/minikube-integration/18169-66678/.minikube/profiles/addons-913502 for IP: 192.168.49.2
I0213 23:01:58.936295 74928 certs.go:190] acquiring lock for shared ca certs: {Name:mkdb62e9ebaf532b9b3d230de7912db241faf3db Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0213 23:01:58.936450 74928 certs.go:204] generating minikubeCA CA: /home/jenkins/minikube-integration/18169-66678/.minikube/ca.key
I0213 23:01:59.093397 74928 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/18169-66678/.minikube/ca.crt ...
I0213 23:01:59.093429 74928 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/18169-66678/.minikube/ca.crt: {Name:mkcf281713fd12da39950efae50854b08ec69f43 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0213 23:01:59.093632 74928 crypto.go:164] Writing key to /home/jenkins/minikube-integration/18169-66678/.minikube/ca.key ...
I0213 23:01:59.093649 74928 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/18169-66678/.minikube/ca.key: {Name:mk225d429655218b9b579662fe6463af54f8cb85 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0213 23:01:59.093749 74928 certs.go:204] generating proxyClientCA CA: /home/jenkins/minikube-integration/18169-66678/.minikube/proxy-client-ca.key
I0213 23:01:59.395159 74928 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/18169-66678/.minikube/proxy-client-ca.crt ...
I0213 23:01:59.395189 74928 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/18169-66678/.minikube/proxy-client-ca.crt: {Name:mk05d31287cd9bb468f5aae5f083e3b0be506f86 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0213 23:01:59.395384 74928 crypto.go:164] Writing key to /home/jenkins/minikube-integration/18169-66678/.minikube/proxy-client-ca.key ...
I0213 23:01:59.395403 74928 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/18169-66678/.minikube/proxy-client-ca.key: {Name:mkfc546f26fcdc0c4a4d3a5ba65de81c69e801b0 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0213 23:01:59.395728 74928 certs.go:319] generating minikube-user signed cert: /home/jenkins/minikube-integration/18169-66678/.minikube/profiles/addons-913502/client.key
I0213 23:01:59.395752 74928 crypto.go:68] Generating cert /home/jenkins/minikube-integration/18169-66678/.minikube/profiles/addons-913502/client.crt with IP's: []
I0213 23:01:59.526797 74928 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/18169-66678/.minikube/profiles/addons-913502/client.crt ...
I0213 23:01:59.526832 74928 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/18169-66678/.minikube/profiles/addons-913502/client.crt: {Name:mk4a811e002ff90cce18e260f32e0acf1acb4d2f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0213 23:01:59.527016 74928 crypto.go:164] Writing key to /home/jenkins/minikube-integration/18169-66678/.minikube/profiles/addons-913502/client.key ...
I0213 23:01:59.527033 74928 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/18169-66678/.minikube/profiles/addons-913502/client.key: {Name:mkd8c37d87ca35621b5c85f65432c3c210e0308f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0213 23:01:59.527125 74928 certs.go:319] generating minikube signed cert: /home/jenkins/minikube-integration/18169-66678/.minikube/profiles/addons-913502/apiserver.key.dd3b5fb2
I0213 23:01:59.527147 74928 crypto.go:68] Generating cert /home/jenkins/minikube-integration/18169-66678/.minikube/profiles/addons-913502/apiserver.crt.dd3b5fb2 with IP's: [192.168.49.2 10.96.0.1 127.0.0.1 10.0.0.1]
I0213 23:01:59.621220 74928 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/18169-66678/.minikube/profiles/addons-913502/apiserver.crt.dd3b5fb2 ...
I0213 23:01:59.621254 74928 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/18169-66678/.minikube/profiles/addons-913502/apiserver.crt.dd3b5fb2: {Name:mk94d711cbc838f540a49336000043c160510b9d Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0213 23:01:59.621453 74928 crypto.go:164] Writing key to /home/jenkins/minikube-integration/18169-66678/.minikube/profiles/addons-913502/apiserver.key.dd3b5fb2 ...
I0213 23:01:59.621474 74928 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/18169-66678/.minikube/profiles/addons-913502/apiserver.key.dd3b5fb2: {Name:mkb47d1757d7852a332b9309583376038449085f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0213 23:01:59.621579 74928 certs.go:337] copying /home/jenkins/minikube-integration/18169-66678/.minikube/profiles/addons-913502/apiserver.crt.dd3b5fb2 -> /home/jenkins/minikube-integration/18169-66678/.minikube/profiles/addons-913502/apiserver.crt
I0213 23:01:59.621683 74928 certs.go:341] copying /home/jenkins/minikube-integration/18169-66678/.minikube/profiles/addons-913502/apiserver.key.dd3b5fb2 -> /home/jenkins/minikube-integration/18169-66678/.minikube/profiles/addons-913502/apiserver.key
I0213 23:01:59.621755 74928 certs.go:319] generating aggregator signed cert: /home/jenkins/minikube-integration/18169-66678/.minikube/profiles/addons-913502/proxy-client.key
I0213 23:01:59.621776 74928 crypto.go:68] Generating cert /home/jenkins/minikube-integration/18169-66678/.minikube/profiles/addons-913502/proxy-client.crt with IP's: []
I0213 23:01:59.876903 74928 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/18169-66678/.minikube/profiles/addons-913502/proxy-client.crt ...
I0213 23:01:59.876941 74928 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/18169-66678/.minikube/profiles/addons-913502/proxy-client.crt: {Name:mk87a7369728a86f2a86e241b41de8175819e02a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0213 23:01:59.877140 74928 crypto.go:164] Writing key to /home/jenkins/minikube-integration/18169-66678/.minikube/profiles/addons-913502/proxy-client.key ...
I0213 23:01:59.877162 74928 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/18169-66678/.minikube/profiles/addons-913502/proxy-client.key: {Name:mka4e4f7ea3dd20887482b9f5ba6abf9b502b86f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0213 23:01:59.877393 74928 certs.go:437] found cert: /home/jenkins/minikube-integration/18169-66678/.minikube/certs/home/jenkins/minikube-integration/18169-66678/.minikube/certs/ca-key.pem (1679 bytes)
I0213 23:01:59.877438 74928 certs.go:437] found cert: /home/jenkins/minikube-integration/18169-66678/.minikube/certs/home/jenkins/minikube-integration/18169-66678/.minikube/certs/ca.pem (1078 bytes)
I0213 23:01:59.877477 74928 certs.go:437] found cert: /home/jenkins/minikube-integration/18169-66678/.minikube/certs/home/jenkins/minikube-integration/18169-66678/.minikube/certs/cert.pem (1123 bytes)
I0213 23:01:59.877511 74928 certs.go:437] found cert: /home/jenkins/minikube-integration/18169-66678/.minikube/certs/home/jenkins/minikube-integration/18169-66678/.minikube/certs/key.pem (1679 bytes)
I0213 23:01:59.878116 74928 ssh_runner.go:362] scp /home/jenkins/minikube-integration/18169-66678/.minikube/profiles/addons-913502/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1399 bytes)
I0213 23:01:59.899794 74928 ssh_runner.go:362] scp /home/jenkins/minikube-integration/18169-66678/.minikube/profiles/addons-913502/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I0213 23:01:59.920403 74928 ssh_runner.go:362] scp /home/jenkins/minikube-integration/18169-66678/.minikube/profiles/addons-913502/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0213 23:01:59.941045 74928 ssh_runner.go:362] scp /home/jenkins/minikube-integration/18169-66678/.minikube/profiles/addons-913502/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I0213 23:01:59.962196 74928 ssh_runner.go:362] scp /home/jenkins/minikube-integration/18169-66678/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0213 23:01:59.982676 74928 ssh_runner.go:362] scp /home/jenkins/minikube-integration/18169-66678/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I0213 23:02:00.003213 74928 ssh_runner.go:362] scp /home/jenkins/minikube-integration/18169-66678/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0213 23:02:00.023877 74928 ssh_runner.go:362] scp /home/jenkins/minikube-integration/18169-66678/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I0213 23:02:00.044456 74928 ssh_runner.go:362] scp /home/jenkins/minikube-integration/18169-66678/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0213 23:02:00.065346 74928 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I0213 23:02:00.081154 74928 ssh_runner.go:195] Run: openssl version
I0213 23:02:00.086740 74928 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0213 23:02:00.095160 74928 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0213 23:02:00.098476 74928 certs.go:480] hashing: -rw-r--r-- 1 root root 1111 Feb 13 23:01 /usr/share/ca-certificates/minikubeCA.pem
I0213 23:02:00.098528 74928 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0213 23:02:00.104713 74928 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0213 23:02:00.112730 74928 ssh_runner.go:195] Run: ls /var/lib/minikube/certs/etcd
I0213 23:02:00.115556 74928 certs.go:353] certs directory doesn't exist, likely first start: ls /var/lib/minikube/certs/etcd: Process exited with status 2
stdout:
stderr:
ls: cannot access '/var/lib/minikube/certs/etcd': No such file or directory
I0213 23:02:00.115633 74928 kubeadm.go:404] StartCluster: {Name:addons-913502 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1704759386-17866@sha256:8c3c33047f9bc285e1f5f2a5aa14744a2fe04c58478f02f77b06169dea8dd3f0 Memory:4000 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.4 ClusterName:addons-913502 Namespace:default APIServerName:minikubeCA APIServerNames:[] APISe
rverIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI: NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.28.4 ContainerRuntime:crio ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:f
alse CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs:}
I0213 23:02:00.115705 74928 cri.go:54] listing CRI containers in root : {State:paused Name: Namespaces:[kube-system]}
I0213 23:02:00.115740 74928 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I0213 23:02:00.147868 74928 cri.go:89] found id: ""
I0213 23:02:00.147937 74928 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I0213 23:02:00.155791 74928 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I0213 23:02:00.163702 74928 kubeadm.go:226] ignoring SystemVerification for kubeadm because of docker driver
I0213 23:02:00.163759 74928 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I0213 23:02:00.171567 74928 kubeadm.go:152] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I0213 23:02:00.171605 74928 ssh_runner.go:286] Start: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.28.4:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I0213 23:02:00.250786 74928 kubeadm.go:322] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1051-gcp\n", err: exit status 1
I0213 23:02:00.313250 74928 kubeadm.go:322] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I0213 23:02:09.611168 74928 kubeadm.go:322] [init] Using Kubernetes version: v1.28.4
I0213 23:02:09.611248 74928 kubeadm.go:322] [preflight] Running pre-flight checks
I0213 23:02:09.611357 74928 kubeadm.go:322] [preflight] The system verification failed. Printing the output from the verification:
I0213 23:02:09.611435 74928 kubeadm.go:322] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1051-gcp[0m
I0213 23:02:09.611481 74928 kubeadm.go:322] [0;37mOS[0m: [0;32mLinux[0m
I0213 23:02:09.611517 74928 kubeadm.go:322] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I0213 23:02:09.611560 74928 kubeadm.go:322] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I0213 23:02:09.611596 74928 kubeadm.go:322] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I0213 23:02:09.611637 74928 kubeadm.go:322] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I0213 23:02:09.611674 74928 kubeadm.go:322] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I0213 23:02:09.611716 74928 kubeadm.go:322] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I0213 23:02:09.611754 74928 kubeadm.go:322] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I0213 23:02:09.611791 74928 kubeadm.go:322] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I0213 23:02:09.611827 74928 kubeadm.go:322] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I0213 23:02:09.611905 74928 kubeadm.go:322] [preflight] Pulling images required for setting up a Kubernetes cluster
I0213 23:02:09.612028 74928 kubeadm.go:322] [preflight] This might take a minute or two, depending on the speed of your internet connection
I0213 23:02:09.612113 74928 kubeadm.go:322] [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
I0213 23:02:09.612167 74928 kubeadm.go:322] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I0213 23:02:09.613734 74928 out.go:204] - Generating certificates and keys ...
I0213 23:02:09.613815 74928 kubeadm.go:322] [certs] Using existing ca certificate authority
I0213 23:02:09.613884 74928 kubeadm.go:322] [certs] Using existing apiserver certificate and key on disk
I0213 23:02:09.613967 74928 kubeadm.go:322] [certs] Generating "apiserver-kubelet-client" certificate and key
I0213 23:02:09.614015 74928 kubeadm.go:322] [certs] Generating "front-proxy-ca" certificate and key
I0213 23:02:09.614104 74928 kubeadm.go:322] [certs] Generating "front-proxy-client" certificate and key
I0213 23:02:09.614191 74928 kubeadm.go:322] [certs] Generating "etcd/ca" certificate and key
I0213 23:02:09.614253 74928 kubeadm.go:322] [certs] Generating "etcd/server" certificate and key
I0213 23:02:09.614380 74928 kubeadm.go:322] [certs] etcd/server serving cert is signed for DNS names [addons-913502 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0213 23:02:09.614439 74928 kubeadm.go:322] [certs] Generating "etcd/peer" certificate and key
I0213 23:02:09.614583 74928 kubeadm.go:322] [certs] etcd/peer serving cert is signed for DNS names [addons-913502 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0213 23:02:09.614642 74928 kubeadm.go:322] [certs] Generating "etcd/healthcheck-client" certificate and key
I0213 23:02:09.614694 74928 kubeadm.go:322] [certs] Generating "apiserver-etcd-client" certificate and key
I0213 23:02:09.614736 74928 kubeadm.go:322] [certs] Generating "sa" key and public key
I0213 23:02:09.614785 74928 kubeadm.go:322] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I0213 23:02:09.614825 74928 kubeadm.go:322] [kubeconfig] Writing "admin.conf" kubeconfig file
I0213 23:02:09.614867 74928 kubeadm.go:322] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I0213 23:02:09.614960 74928 kubeadm.go:322] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I0213 23:02:09.615035 74928 kubeadm.go:322] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I0213 23:02:09.615121 74928 kubeadm.go:322] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I0213 23:02:09.615193 74928 kubeadm.go:322] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I0213 23:02:09.616897 74928 out.go:204] - Booting up control plane ...
I0213 23:02:09.616996 74928 kubeadm.go:322] [control-plane] Creating static Pod manifest for "kube-apiserver"
I0213 23:02:09.617062 74928 kubeadm.go:322] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I0213 23:02:09.617136 74928 kubeadm.go:322] [control-plane] Creating static Pod manifest for "kube-scheduler"
I0213 23:02:09.617263 74928 kubeadm.go:322] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I0213 23:02:09.617376 74928 kubeadm.go:322] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I0213 23:02:09.617417 74928 kubeadm.go:322] [kubelet-start] Starting the kubelet
I0213 23:02:09.617535 74928 kubeadm.go:322] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
I0213 23:02:09.617593 74928 kubeadm.go:322] [apiclient] All control plane components are healthy after 5.002702 seconds
I0213 23:02:09.617672 74928 kubeadm.go:322] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I0213 23:02:09.617784 74928 kubeadm.go:322] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I0213 23:02:09.617842 74928 kubeadm.go:322] [upload-certs] Skipping phase. Please see --upload-certs
I0213 23:02:09.617985 74928 kubeadm.go:322] [mark-control-plane] Marking the node addons-913502 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I0213 23:02:09.618057 74928 kubeadm.go:322] [bootstrap-token] Using token: unxy31.n4ikrym8ylskkum4
I0213 23:02:09.619538 74928 out.go:204] - Configuring RBAC rules ...
I0213 23:02:09.619664 74928 kubeadm.go:322] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I0213 23:02:09.619778 74928 kubeadm.go:322] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I0213 23:02:09.619970 74928 kubeadm.go:322] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I0213 23:02:09.620128 74928 kubeadm.go:322] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I0213 23:02:09.620289 74928 kubeadm.go:322] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I0213 23:02:09.620439 74928 kubeadm.go:322] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I0213 23:02:09.620590 74928 kubeadm.go:322] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I0213 23:02:09.620663 74928 kubeadm.go:322] [addons] Applied essential addon: CoreDNS
I0213 23:02:09.620711 74928 kubeadm.go:322] [addons] Applied essential addon: kube-proxy
I0213 23:02:09.620717 74928 kubeadm.go:322]
I0213 23:02:09.620777 74928 kubeadm.go:322] Your Kubernetes control-plane has initialized successfully!
I0213 23:02:09.620783 74928 kubeadm.go:322]
I0213 23:02:09.620889 74928 kubeadm.go:322] To start using your cluster, you need to run the following as a regular user:
I0213 23:02:09.620901 74928 kubeadm.go:322]
I0213 23:02:09.620936 74928 kubeadm.go:322] mkdir -p $HOME/.kube
I0213 23:02:09.620981 74928 kubeadm.go:322] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I0213 23:02:09.621023 74928 kubeadm.go:322] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I0213 23:02:09.621028 74928 kubeadm.go:322]
I0213 23:02:09.621071 74928 kubeadm.go:322] Alternatively, if you are the root user, you can run:
I0213 23:02:09.621077 74928 kubeadm.go:322]
I0213 23:02:09.621111 74928 kubeadm.go:322] export KUBECONFIG=/etc/kubernetes/admin.conf
I0213 23:02:09.621117 74928 kubeadm.go:322]
I0213 23:02:09.621163 74928 kubeadm.go:322] You should now deploy a pod network to the cluster.
I0213 23:02:09.621226 74928 kubeadm.go:322] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I0213 23:02:09.621279 74928 kubeadm.go:322] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I0213 23:02:09.621284 74928 kubeadm.go:322]
I0213 23:02:09.621350 74928 kubeadm.go:322] You can now join any number of control-plane nodes by copying certificate authorities
I0213 23:02:09.621408 74928 kubeadm.go:322] and service account keys on each node and then running the following as root:
I0213 23:02:09.621415 74928 kubeadm.go:322]
I0213 23:02:09.621476 74928 kubeadm.go:322] kubeadm join control-plane.minikube.internal:8443 --token unxy31.n4ikrym8ylskkum4 \
I0213 23:02:09.621555 74928 kubeadm.go:322] --discovery-token-ca-cert-hash sha256:65a739a3fc766348b9b774a07bf25aabb4395eca8f80a3b593899c4975cd65db \
I0213 23:02:09.621572 74928 kubeadm.go:322] --control-plane
I0213 23:02:09.621579 74928 kubeadm.go:322]
I0213 23:02:09.621682 74928 kubeadm.go:322] Then you can join any number of worker nodes by running the following on each as root:
I0213 23:02:09.621698 74928 kubeadm.go:322]
I0213 23:02:09.621772 74928 kubeadm.go:322] kubeadm join control-plane.minikube.internal:8443 --token unxy31.n4ikrym8ylskkum4 \
I0213 23:02:09.621944 74928 kubeadm.go:322] --discovery-token-ca-cert-hash sha256:65a739a3fc766348b9b774a07bf25aabb4395eca8f80a3b593899c4975cd65db
I0213 23:02:09.621978 74928 cni.go:84] Creating CNI manager for ""
I0213 23:02:09.621990 74928 cni.go:143] "docker" driver + "crio" runtime found, recommending kindnet
I0213 23:02:09.623781 74928 out.go:177] * Configuring CNI (Container Networking Interface) ...
I0213 23:02:09.625221 74928 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I0213 23:02:09.661187 74928 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.28.4/kubectl ...
I0213 23:02:09.661212 74928 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2438 bytes)
I0213 23:02:09.677721 74928 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I0213 23:02:10.340192 74928 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I0213 23:02:10.340272 74928 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I0213 23:02:10.340286 74928 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl label nodes minikube.k8s.io/version=v1.32.0 minikube.k8s.io/commit=90664111bc55fed26ce3e984eae935c06b114802 minikube.k8s.io/name=addons-913502 minikube.k8s.io/updated_at=2024_02_13T23_02_10_0700 minikube.k8s.io/primary=true --all --overwrite --kubeconfig=/var/lib/minikube/kubeconfig
I0213 23:02:10.417873 74928 ops.go:34] apiserver oom_adj: -16
I0213 23:02:10.418015 74928 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0213 23:02:10.918402 74928 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0213 23:02:11.418317 74928 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0213 23:02:11.918115 74928 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0213 23:02:12.419052 74928 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0213 23:02:12.918741 74928 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0213 23:02:13.418656 74928 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0213 23:02:13.918840 74928 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0213 23:02:14.418821 74928 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0213 23:02:14.918986 74928 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0213 23:02:15.418873 74928 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0213 23:02:15.919084 74928 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0213 23:02:16.418610 74928 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0213 23:02:16.918097 74928 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0213 23:02:17.418501 74928 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0213 23:02:17.918815 74928 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0213 23:02:18.418264 74928 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0213 23:02:18.918398 74928 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0213 23:02:19.418543 74928 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0213 23:02:19.918327 74928 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0213 23:02:20.418500 74928 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0213 23:02:20.918842 74928 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0213 23:02:21.418863 74928 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0213 23:02:21.918604 74928 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0213 23:02:22.418857 74928 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0213 23:02:22.918861 74928 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0213 23:02:22.982651 74928 kubeadm.go:1088] duration metric: took 12.642436469s to wait for elevateKubeSystemPrivileges.
I0213 23:02:22.982687 74928 kubeadm.go:406] StartCluster complete in 22.867058371s
I0213 23:02:22.982711 74928 settings.go:142] acquiring lock: {Name:mk89817e7b00c42ae84864184d25a5290738d17b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0213 23:02:22.982831 74928 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/18169-66678/kubeconfig
I0213 23:02:22.983213 74928 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/18169-66678/kubeconfig: {Name:mk1392731503c3f5245f6110a90036e5311cfc32 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0213 23:02:22.983472 74928 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.4/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I0213 23:02:22.983491 74928 addons.go:502] enable addons start: toEnable=map[ambassador:false auto-pause:false cloud-spanner:true csi-hostpath-driver:true dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:true gvisor:false headlamp:false helm-tiller:true inaccel:false ingress:true ingress-dns:true inspektor-gadget:true istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:true nvidia-device-plugin:true nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:true registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:true volumesnapshots:true yakd:true]
I0213 23:02:22.983587 74928 addons.go:69] Setting yakd=true in profile "addons-913502"
I0213 23:02:22.983613 74928 addons.go:234] Setting addon yakd=true in "addons-913502"
I0213 23:02:22.983656 74928 addons.go:69] Setting ingress-dns=true in profile "addons-913502"
I0213 23:02:22.983676 74928 addons.go:234] Setting addon ingress-dns=true in "addons-913502"
I0213 23:02:22.983676 74928 addons.go:69] Setting registry=true in profile "addons-913502"
I0213 23:02:22.983697 74928 config.go:182] Loaded profile config "addons-913502": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.28.4
I0213 23:02:22.983708 74928 addons.go:69] Setting nvidia-device-plugin=true in profile "addons-913502"
I0213 23:02:22.983715 74928 addons.go:69] Setting storage-provisioner=true in profile "addons-913502"
I0213 23:02:22.983727 74928 addons.go:234] Setting addon nvidia-device-plugin=true in "addons-913502"
I0213 23:02:22.983731 74928 host.go:66] Checking if "addons-913502" exists ...
I0213 23:02:22.983744 74928 addons.go:69] Setting inspektor-gadget=true in profile "addons-913502"
I0213 23:02:22.983750 74928 addons.go:69] Setting volumesnapshots=true in profile "addons-913502"
I0213 23:02:22.983760 74928 addons.go:234] Setting addon inspektor-gadget=true in "addons-913502"
I0213 23:02:22.983770 74928 host.go:66] Checking if "addons-913502" exists ...
I0213 23:02:22.983771 74928 addons.go:234] Setting addon volumesnapshots=true in "addons-913502"
I0213 23:02:22.983742 74928 addons.go:69] Setting metrics-server=true in profile "addons-913502"
I0213 23:02:22.983781 74928 addons.go:69] Setting csi-hostpath-driver=true in profile "addons-913502"
I0213 23:02:22.983793 74928 host.go:66] Checking if "addons-913502" exists ...
I0213 23:02:22.983804 74928 addons.go:234] Setting addon metrics-server=true in "addons-913502"
I0213 23:02:22.983810 74928 host.go:66] Checking if "addons-913502" exists ...
I0213 23:02:22.983858 74928 addons.go:234] Setting addon csi-hostpath-driver=true in "addons-913502"
I0213 23:02:22.983860 74928 host.go:66] Checking if "addons-913502" exists ...
I0213 23:02:22.983699 74928 addons.go:234] Setting addon registry=true in "addons-913502"
I0213 23:02:22.983910 74928 host.go:66] Checking if "addons-913502" exists ...
I0213 23:02:22.983938 74928 host.go:66] Checking if "addons-913502" exists ...
I0213 23:02:22.984184 74928 cli_runner.go:164] Run: docker container inspect addons-913502 --format={{.State.Status}}
I0213 23:02:22.984258 74928 cli_runner.go:164] Run: docker container inspect addons-913502 --format={{.State.Status}}
I0213 23:02:22.984269 74928 cli_runner.go:164] Run: docker container inspect addons-913502 --format={{.State.Status}}
I0213 23:02:22.984283 74928 addons.go:69] Setting helm-tiller=true in profile "addons-913502"
I0213 23:02:22.984296 74928 addons.go:234] Setting addon helm-tiller=true in "addons-913502"
I0213 23:02:22.984353 74928 host.go:66] Checking if "addons-913502" exists ...
I0213 23:02:22.983727 74928 addons.go:234] Setting addon storage-provisioner=true in "addons-913502"
I0213 23:02:22.984388 74928 cli_runner.go:164] Run: docker container inspect addons-913502 --format={{.State.Status}}
I0213 23:02:22.984402 74928 cli_runner.go:164] Run: docker container inspect addons-913502 --format={{.State.Status}}
I0213 23:02:22.984530 74928 host.go:66] Checking if "addons-913502" exists ...
I0213 23:02:22.984846 74928 cli_runner.go:164] Run: docker container inspect addons-913502 --format={{.State.Status}}
I0213 23:02:22.985007 74928 cli_runner.go:164] Run: docker container inspect addons-913502 --format={{.State.Status}}
I0213 23:02:22.983683 74928 host.go:66] Checking if "addons-913502" exists ...
I0213 23:02:22.985740 74928 cli_runner.go:164] Run: docker container inspect addons-913502 --format={{.State.Status}}
I0213 23:02:22.985796 74928 cli_runner.go:164] Run: docker container inspect addons-913502 --format={{.State.Status}}
I0213 23:02:22.983743 74928 addons.go:69] Setting default-storageclass=true in profile "addons-913502"
I0213 23:02:22.990881 74928 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "addons-913502"
I0213 23:02:22.983771 74928 addons.go:69] Setting cloud-spanner=true in profile "addons-913502"
I0213 23:02:22.991095 74928 addons.go:234] Setting addon cloud-spanner=true in "addons-913502"
I0213 23:02:22.991160 74928 host.go:66] Checking if "addons-913502" exists ...
I0213 23:02:22.991725 74928 cli_runner.go:164] Run: docker container inspect addons-913502 --format={{.State.Status}}
I0213 23:02:22.991911 74928 addons.go:69] Setting ingress=true in profile "addons-913502"
I0213 23:02:22.991932 74928 addons.go:234] Setting addon ingress=true in "addons-913502"
I0213 23:02:22.991993 74928 host.go:66] Checking if "addons-913502" exists ...
I0213 23:02:22.984269 74928 cli_runner.go:164] Run: docker container inspect addons-913502 --format={{.State.Status}}
I0213 23:02:22.993062 74928 cli_runner.go:164] Run: docker container inspect addons-913502 --format={{.State.Status}}
I0213 23:02:22.983735 74928 addons.go:69] Setting storage-provisioner-rancher=true in profile "addons-913502"
I0213 23:02:23.002086 74928 addons_storage_classes.go:33] enableOrDisableStorageClasses storage-provisioner-rancher=true on "addons-913502"
I0213 23:02:23.002506 74928 cli_runner.go:164] Run: docker container inspect addons-913502 --format={{.State.Status}}
I0213 23:02:23.002837 74928 addons.go:69] Setting gcp-auth=true in profile "addons-913502"
I0213 23:02:23.002878 74928 mustload.go:65] Loading cluster: addons-913502
I0213 23:02:23.003106 74928 config.go:182] Loaded profile config "addons-913502": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.28.4
I0213 23:02:23.003417 74928 cli_runner.go:164] Run: docker container inspect addons-913502 --format={{.State.Status}}
I0213 23:02:23.004309 74928 cli_runner.go:164] Run: docker container inspect addons-913502 --format={{.State.Status}}
I0213 23:02:23.031862 74928 out.go:177] - Using image registry.k8s.io/sig-storage/snapshot-controller:v6.1.0
I0213 23:02:23.033835 74928 addons.go:426] installing /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml
I0213 23:02:23.033908 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml (934 bytes)
I0213 23:02:23.034001 74928 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-913502
I0213 23:02:23.032532 74928 out.go:177] - Using image registry.k8s.io/metrics-server/metrics-server:v0.7.0
I0213 23:02:23.036190 74928 out.go:177] - Using image docker.io/registry:2.8.3
I0213 23:02:23.037837 74928 out.go:177] - Using image gcr.io/k8s-minikube/kube-registry-proxy:0.0.5
I0213 23:02:23.036167 74928 addons.go:426] installing /etc/kubernetes/addons/metrics-apiservice.yaml
I0213 23:02:23.039354 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-apiservice.yaml (424 bytes)
I0213 23:02:23.039417 74928 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-913502
I0213 23:02:23.039569 74928 addons.go:426] installing /etc/kubernetes/addons/registry-rc.yaml
I0213 23:02:23.039586 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-rc.yaml (798 bytes)
I0213 23:02:23.039667 74928 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-913502
I0213 23:02:23.051407 74928 out.go:177] - Using image ghcr.io/helm/tiller:v2.17.0
I0213 23:02:23.047005 74928 addons.go:234] Setting addon storage-provisioner-rancher=true in "addons-913502"
I0213 23:02:23.053844 74928 addons.go:426] installing /etc/kubernetes/addons/helm-tiller-dp.yaml
I0213 23:02:23.054002 74928 out.go:177] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I0213 23:02:23.054046 74928 host.go:66] Checking if "addons-913502" exists ...
I0213 23:02:23.060044 74928 out.go:177] - Using image registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0
I0213 23:02:23.060130 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/helm-tiller-dp.yaml (2422 bytes)
I0213 23:02:23.060136 74928 out.go:177] - Using image nvcr.io/nvidia/k8s-device-plugin:v0.14.3
I0213 23:02:23.065283 74928 cli_runner.go:164] Run: docker container inspect addons-913502 --format={{.State.Status}}
I0213 23:02:23.065293 74928 host.go:66] Checking if "addons-913502" exists ...
I0213 23:02:23.065952 74928 out.go:177] - Using image gcr.io/cloud-spanner-emulator/emulator:1.5.13
I0213 23:02:23.066282 74928 out.go:177] - Using image gcr.io/k8s-minikube/minikube-ingress-dns:0.0.2
I0213 23:02:23.068515 74928 out.go:177] - Using image ghcr.io/inspektor-gadget/inspektor-gadget:v0.25.1
I0213 23:02:23.068601 74928 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-913502
I0213 23:02:23.070874 74928 addons.go:426] installing /etc/kubernetes/addons/storage-provisioner.yaml
I0213 23:02:23.078635 74928 out.go:177] - Using image registry.k8s.io/sig-storage/csi-provisioner:v3.3.0
I0213 23:02:23.073093 74928 addons.go:426] installing /etc/kubernetes/addons/nvidia-device-plugin.yaml
I0213 23:02:23.073162 74928 out.go:177] - Using image docker.io/marcnuri/yakd:0.0.4
I0213 23:02:23.073234 74928 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/18169-66678/.minikube/machines/addons-913502/id_rsa Username:docker}
I0213 23:02:23.073375 74928 addons.go:426] installing /etc/kubernetes/addons/deployment.yaml
I0213 23:02:23.073451 74928 addons.go:426] installing /etc/kubernetes/addons/ig-namespace.yaml
I0213 23:02:23.073943 74928 addons.go:426] installing /etc/kubernetes/addons/ingress-dns-pod.yaml
I0213 23:02:23.073959 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I0213 23:02:23.081254 74928 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-913502
I0213 23:02:23.081475 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/nvidia-device-plugin.yaml (1966 bytes)
I0213 23:02:23.081536 74928 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-913502
I0213 23:02:23.081666 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-namespace.yaml (55 bytes)
I0213 23:02:23.081714 74928 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-913502
I0213 23:02:23.084398 74928 addons.go:426] installing /etc/kubernetes/addons/yakd-ns.yaml
I0213 23:02:23.084419 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/yakd-ns.yaml (171 bytes)
I0213 23:02:23.084478 74928 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-913502
I0213 23:02:23.083047 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ingress-dns-pod.yaml (2442 bytes)
I0213 23:02:23.086824 74928 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-913502
I0213 23:02:23.083132 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/deployment.yaml (1004 bytes)
I0213 23:02:23.087039 74928 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-913502
I0213 23:02:23.089123 74928 out.go:177] - Using image registry.k8s.io/sig-storage/csi-attacher:v4.0.0
I0213 23:02:23.090761 74928 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20231011-8b53cabe0
I0213 23:02:23.097820 74928 out.go:177] - Using image registry.k8s.io/sig-storage/csi-external-health-monitor-controller:v0.7.0
I0213 23:02:23.097802 74928 out.go:177] - Using image registry.k8s.io/ingress-nginx/controller:v1.9.5
I0213 23:02:23.099279 74928 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/18169-66678/.minikube/machines/addons-913502/id_rsa Username:docker}
I0213 23:02:23.102907 74928 out.go:177] - Using image docker.io/busybox:stable
I0213 23:02:23.103559 74928 addons.go:234] Setting addon default-storageclass=true in "addons-913502"
I0213 23:02:23.101221 74928 out.go:177] - Using image registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.6.0
I0213 23:02:23.104497 74928 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/18169-66678/.minikube/machines/addons-913502/id_rsa Username:docker}
I0213 23:02:23.104916 74928 out.go:177] - Using image docker.io/rancher/local-path-provisioner:v0.0.22
I0213 23:02:23.104967 74928 host.go:66] Checking if "addons-913502" exists ...
I0213 23:02:23.109605 74928 cli_runner.go:164] Run: docker container inspect addons-913502 --format={{.State.Status}}
I0213 23:02:23.112565 74928 addons.go:426] installing /etc/kubernetes/addons/storage-provisioner-rancher.yaml
I0213 23:02:23.112584 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner-rancher.yaml (3113 bytes)
I0213 23:02:23.112637 74928 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-913502
I0213 23:02:23.112669 74928 out.go:177] - Using image registry.k8s.io/sig-storage/hostpathplugin:v1.9.0
I0213 23:02:23.112749 74928 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20231011-8b53cabe0
I0213 23:02:23.120438 74928 addons.go:426] installing /etc/kubernetes/addons/ingress-deploy.yaml
I0213 23:02:23.120464 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ingress-deploy.yaml (16103 bytes)
I0213 23:02:23.120547 74928 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-913502
I0213 23:02:23.114776 74928 out.go:177] - Using image registry.k8s.io/sig-storage/livenessprobe:v2.8.0
I0213 23:02:23.122602 74928 out.go:177] - Using image registry.k8s.io/sig-storage/csi-resizer:v1.6.0
I0213 23:02:23.128489 74928 addons.go:426] installing /etc/kubernetes/addons/rbac-external-attacher.yaml
I0213 23:02:23.128511 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/rbac-external-attacher.yaml (3073 bytes)
I0213 23:02:23.128570 74928 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-913502
I0213 23:02:23.123911 74928 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/18169-66678/.minikube/machines/addons-913502/id_rsa Username:docker}
I0213 23:02:23.127342 74928 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/18169-66678/.minikube/machines/addons-913502/id_rsa Username:docker}
I0213 23:02:23.132510 74928 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/18169-66678/.minikube/machines/addons-913502/id_rsa Username:docker}
I0213 23:02:23.133391 74928 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/18169-66678/.minikube/machines/addons-913502/id_rsa Username:docker}
I0213 23:02:23.136234 74928 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/18169-66678/.minikube/machines/addons-913502/id_rsa Username:docker}
I0213 23:02:23.145281 74928 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/18169-66678/.minikube/machines/addons-913502/id_rsa Username:docker}
I0213 23:02:23.145869 74928 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/18169-66678/.minikube/machines/addons-913502/id_rsa Username:docker}
I0213 23:02:23.150678 74928 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/18169-66678/.minikube/machines/addons-913502/id_rsa Username:docker}
I0213 23:02:23.151507 74928 addons.go:426] installing /etc/kubernetes/addons/storageclass.yaml
I0213 23:02:23.151522 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I0213 23:02:23.151561 74928 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-913502
I0213 23:02:23.152797 74928 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/18169-66678/.minikube/machines/addons-913502/id_rsa Username:docker}
I0213 23:02:23.160097 74928 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/18169-66678/.minikube/machines/addons-913502/id_rsa Username:docker}
W0213 23:02:23.169014 74928 sshutil.go:64] dial failure (will retry): ssh: handshake failed: EOF
I0213 23:02:23.169044 74928 retry.go:31] will retry after 213.16075ms: ssh: handshake failed: EOF
I0213 23:02:23.189261 74928 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/18169-66678/.minikube/machines/addons-913502/id_rsa Username:docker}
I0213 23:02:23.192668 74928 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.4/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.28.4/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I0213 23:02:23.364619 74928 addons.go:426] installing /etc/kubernetes/addons/metrics-server-deployment.yaml
I0213 23:02:23.364720 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-deployment.yaml (1907 bytes)
I0213 23:02:23.387238 74928 addons.go:426] installing /etc/kubernetes/addons/helm-tiller-rbac.yaml
I0213 23:02:23.387332 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/helm-tiller-rbac.yaml (1188 bytes)
I0213 23:02:23.387243 74928 addons.go:426] installing /etc/kubernetes/addons/registry-svc.yaml
I0213 23:02:23.387432 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-svc.yaml (398 bytes)
I0213 23:02:23.390629 74928 addons.go:426] installing /etc/kubernetes/addons/metrics-server-rbac.yaml
I0213 23:02:23.390670 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-rbac.yaml (2175 bytes)
I0213 23:02:23.484241 74928 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/ingress-deploy.yaml
I0213 23:02:23.563949 74928 kapi.go:248] "coredns" deployment in "kube-system" namespace and "addons-913502" context rescaled to 1 replicas
I0213 23:02:23.564077 74928 start.go:223] Will wait 6m0s for node &{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.28.4 ContainerRuntime:crio ControlPlane:true Worker:true}
I0213 23:02:23.566218 74928 out.go:177] * Verifying Kubernetes components...
I0213 23:02:23.567950 74928 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I0213 23:02:23.568779 74928 addons.go:426] installing /etc/kubernetes/addons/registry-proxy.yaml
I0213 23:02:23.568829 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-proxy.yaml (947 bytes)
I0213 23:02:23.575121 74928 addons.go:426] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml
I0213 23:02:23.575157 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml (6471 bytes)
I0213 23:02:23.578358 74928 addons.go:426] installing /etc/kubernetes/addons/yakd-sa.yaml
I0213 23:02:23.578381 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/yakd-sa.yaml (247 bytes)
I0213 23:02:23.580745 74928 addons.go:426] installing /etc/kubernetes/addons/helm-tiller-svc.yaml
I0213 23:02:23.580767 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/helm-tiller-svc.yaml (951 bytes)
I0213 23:02:23.583468 74928 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/storage-provisioner-rancher.yaml
I0213 23:02:23.585933 74928 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/deployment.yaml
I0213 23:02:23.661184 74928 addons.go:426] installing /etc/kubernetes/addons/metrics-server-service.yaml
I0213 23:02:23.661288 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-service.yaml (446 bytes)
I0213 23:02:23.662332 74928 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I0213 23:02:23.762737 74928 addons.go:426] installing /etc/kubernetes/addons/ig-serviceaccount.yaml
I0213 23:02:23.762828 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-serviceaccount.yaml (80 bytes)
I0213 23:02:23.763106 74928 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/nvidia-device-plugin.yaml
I0213 23:02:23.764021 74928 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/ingress-dns-pod.yaml
I0213 23:02:23.779502 74928 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/helm-tiller-dp.yaml -f /etc/kubernetes/addons/helm-tiller-rbac.yaml -f /etc/kubernetes/addons/helm-tiller-svc.yaml
I0213 23:02:23.779758 74928 addons.go:426] installing /etc/kubernetes/addons/yakd-crb.yaml
I0213 23:02:23.779805 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/yakd-crb.yaml (422 bytes)
I0213 23:02:23.781142 74928 addons.go:426] installing /etc/kubernetes/addons/rbac-hostpath.yaml
I0213 23:02:23.781211 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/rbac-hostpath.yaml (4266 bytes)
I0213 23:02:23.782278 74928 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
I0213 23:02:23.965016 74928 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I0213 23:02:23.966343 74928 addons.go:426] installing /etc/kubernetes/addons/ig-role.yaml
I0213 23:02:23.966412 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-role.yaml (210 bytes)
I0213 23:02:23.968040 74928 addons.go:426] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml
I0213 23:02:23.968103 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml (23126 bytes)
I0213 23:02:23.969633 74928 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/registry-rc.yaml -f /etc/kubernetes/addons/registry-svc.yaml -f /etc/kubernetes/addons/registry-proxy.yaml
I0213 23:02:24.178833 74928 addons.go:426] installing /etc/kubernetes/addons/yakd-svc.yaml
I0213 23:02:24.178927 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/yakd-svc.yaml (412 bytes)
I0213 23:02:24.263228 74928 addons.go:426] installing /etc/kubernetes/addons/ig-rolebinding.yaml
I0213 23:02:24.263323 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-rolebinding.yaml (244 bytes)
I0213 23:02:24.282218 74928 addons.go:426] installing /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml
I0213 23:02:24.282299 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml (3038 bytes)
I0213 23:02:24.675596 74928 addons.go:426] installing /etc/kubernetes/addons/ig-clusterrole.yaml
I0213 23:02:24.675695 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-clusterrole.yaml (1485 bytes)
I0213 23:02:24.873617 74928 addons.go:426] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml
I0213 23:02:24.873708 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml (19582 bytes)
I0213 23:02:24.883440 74928 addons.go:426] installing /etc/kubernetes/addons/rbac-external-provisioner.yaml
I0213 23:02:24.883470 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/rbac-external-provisioner.yaml (4442 bytes)
I0213 23:02:24.962072 74928 addons.go:426] installing /etc/kubernetes/addons/yakd-dp.yaml
I0213 23:02:24.962202 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/yakd-dp.yaml (2017 bytes)
I0213 23:02:25.079877 74928 addons.go:426] installing /etc/kubernetes/addons/ig-clusterrolebinding.yaml
I0213 23:02:25.079973 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-clusterrolebinding.yaml (274 bytes)
I0213 23:02:25.178929 74928 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.4/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.28.4/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -": (1.986222793s)
I0213 23:02:25.178970 74928 start.go:929] {"host.minikube.internal": 192.168.49.1} host record injected into CoreDNS's ConfigMap
I0213 23:02:25.281008 74928 addons.go:426] installing /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml
I0213 23:02:25.281100 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml (3545 bytes)
I0213 23:02:25.462179 74928 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/yakd-ns.yaml -f /etc/kubernetes/addons/yakd-sa.yaml -f /etc/kubernetes/addons/yakd-crb.yaml -f /etc/kubernetes/addons/yakd-svc.yaml -f /etc/kubernetes/addons/yakd-dp.yaml
I0213 23:02:25.580592 74928 addons.go:426] installing /etc/kubernetes/addons/rbac-external-resizer.yaml
I0213 23:02:25.580689 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/rbac-external-resizer.yaml (2943 bytes)
I0213 23:02:25.762351 74928 addons.go:426] installing /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I0213 23:02:25.762459 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml (1475 bytes)
I0213 23:02:25.867338 74928 addons.go:426] installing /etc/kubernetes/addons/ig-crd.yaml
I0213 23:02:25.867431 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-crd.yaml (5216 bytes)
I0213 23:02:26.074969 74928 addons.go:426] installing /etc/kubernetes/addons/rbac-external-snapshotter.yaml
I0213 23:02:26.075073 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/rbac-external-snapshotter.yaml (3149 bytes)
I0213 23:02:26.181807 74928 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I0213 23:02:26.368405 74928 addons.go:426] installing /etc/kubernetes/addons/csi-hostpath-attacher.yaml
I0213 23:02:26.368489 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-attacher.yaml (2143 bytes)
I0213 23:02:26.371468 74928 addons.go:426] installing /etc/kubernetes/addons/ig-daemonset.yaml
I0213 23:02:26.371539 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-daemonset.yaml (7735 bytes)
I0213 23:02:26.682999 74928 addons.go:426] installing /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml
I0213 23:02:26.683075 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml (1274 bytes)
I0213 23:02:26.763436 74928 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/ig-namespace.yaml -f /etc/kubernetes/addons/ig-serviceaccount.yaml -f /etc/kubernetes/addons/ig-role.yaml -f /etc/kubernetes/addons/ig-rolebinding.yaml -f /etc/kubernetes/addons/ig-clusterrole.yaml -f /etc/kubernetes/addons/ig-clusterrolebinding.yaml -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-daemonset.yaml
I0213 23:02:27.164865 74928 addons.go:426] installing /etc/kubernetes/addons/csi-hostpath-plugin.yaml
I0213 23:02:27.164963 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-plugin.yaml (8201 bytes)
I0213 23:02:27.379169 74928 addons.go:426] installing /etc/kubernetes/addons/csi-hostpath-resizer.yaml
I0213 23:02:27.379254 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-resizer.yaml (2191 bytes)
I0213 23:02:27.582716 74928 addons.go:426] installing /etc/kubernetes/addons/csi-hostpath-storageclass.yaml
I0213 23:02:27.582754 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-storageclass.yaml (846 bytes)
I0213 23:02:27.768007 74928 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/rbac-external-attacher.yaml -f /etc/kubernetes/addons/rbac-hostpath.yaml -f /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml -f /etc/kubernetes/addons/rbac-external-provisioner.yaml -f /etc/kubernetes/addons/rbac-external-resizer.yaml -f /etc/kubernetes/addons/rbac-external-snapshotter.yaml -f /etc/kubernetes/addons/csi-hostpath-attacher.yaml -f /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml -f /etc/kubernetes/addons/csi-hostpath-plugin.yaml -f /etc/kubernetes/addons/csi-hostpath-resizer.yaml -f /etc/kubernetes/addons/csi-hostpath-storageclass.yaml
I0213 23:02:29.878176 74928 ssh_runner.go:362] scp memory --> /var/lib/minikube/google_application_credentials.json (162 bytes)
I0213 23:02:29.878299 74928 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-913502
I0213 23:02:29.902147 74928 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/18169-66678/.minikube/machines/addons-913502/id_rsa Username:docker}
I0213 23:02:29.980848 74928 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/ingress-deploy.yaml: (6.496480267s)
I0213 23:02:29.980901 74928 addons.go:470] Verifying addon ingress=true in "addons-913502"
I0213 23:02:29.982345 74928 out.go:177] * Verifying ingress addon...
I0213 23:02:29.981085 74928 ssh_runner.go:235] Completed: sudo systemctl is-active --quiet service kubelet: (6.413066893s)
I0213 23:02:29.981193 74928 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/storage-provisioner-rancher.yaml: (6.39769359s)
I0213 23:02:29.981243 74928 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/deployment.yaml: (6.39523503s)
I0213 23:02:29.981291 74928 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: (6.318879963s)
I0213 23:02:29.981340 74928 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/nvidia-device-plugin.yaml: (6.218176586s)
I0213 23:02:29.981383 74928 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/ingress-dns-pod.yaml: (6.217336657s)
I0213 23:02:29.981422 74928 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/helm-tiller-dp.yaml -f /etc/kubernetes/addons/helm-tiller-rbac.yaml -f /etc/kubernetes/addons/helm-tiller-svc.yaml: (6.201784987s)
I0213 23:02:29.981506 74928 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: (6.198981444s)
I0213 23:02:29.981547 74928 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (6.016503521s)
I0213 23:02:29.981578 74928 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/registry-rc.yaml -f /etc/kubernetes/addons/registry-svc.yaml -f /etc/kubernetes/addons/registry-proxy.yaml: (6.011857893s)
I0213 23:02:29.981618 74928 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/yakd-ns.yaml -f /etc/kubernetes/addons/yakd-sa.yaml -f /etc/kubernetes/addons/yakd-crb.yaml -f /etc/kubernetes/addons/yakd-svc.yaml -f /etc/kubernetes/addons/yakd-dp.yaml: (4.519338766s)
I0213 23:02:29.981715 74928 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: (3.799822945s)
I0213 23:02:29.981772 74928 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/ig-namespace.yaml -f /etc/kubernetes/addons/ig-serviceaccount.yaml -f /etc/kubernetes/addons/ig-role.yaml -f /etc/kubernetes/addons/ig-rolebinding.yaml -f /etc/kubernetes/addons/ig-clusterrole.yaml -f /etc/kubernetes/addons/ig-clusterrolebinding.yaml -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-daemonset.yaml: (3.218245521s)
I0213 23:02:29.984635 74928 kapi.go:75] Waiting for pod with label "app.kubernetes.io/name=ingress-nginx" in ns "ingress-nginx" ...
I0213 23:02:29.984872 74928 addons.go:470] Verifying addon metrics-server=true in "addons-913502"
I0213 23:02:29.984898 74928 addons.go:470] Verifying addon registry=true in "addons-913502"
I0213 23:02:29.988104 74928 out.go:177] * Verifying registry addon...
I0213 23:02:29.989644 74928 out.go:177] * To access YAKD - Kubernetes Dashboard, wait for Pod to be ready and run the following command:
minikube -p addons-913502 service yakd-dashboard -n yakd-dashboard
W0213 23:02:29.985071 74928 addons.go:452] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: Process exited with status 1
stdout:
customresourcedefinition.apiextensions.k8s.io/volumesnapshotclasses.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshotcontents.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshots.snapshot.storage.k8s.io created
serviceaccount/snapshot-controller created
clusterrole.rbac.authorization.k8s.io/snapshot-controller-runner created
clusterrolebinding.rbac.authorization.k8s.io/snapshot-controller-role created
role.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
rolebinding.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
deployment.apps/snapshot-controller created
stderr:
error: resource mapping not found for name: "csi-hostpath-snapclass" namespace: "" from "/etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml": no matches for kind "VolumeSnapshotClass" in version "snapshot.storage.k8s.io/v1"
ensure CRDs are installed first
I0213 23:02:29.985575 74928 node_ready.go:35] waiting up to 6m0s for node "addons-913502" to be "Ready" ...
I0213 23:02:29.991716 74928 retry.go:31] will retry after 138.392466ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: Process exited with status 1
stdout:
customresourcedefinition.apiextensions.k8s.io/volumesnapshotclasses.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshotcontents.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshots.snapshot.storage.k8s.io created
serviceaccount/snapshot-controller created
clusterrole.rbac.authorization.k8s.io/snapshot-controller-runner created
clusterrolebinding.rbac.authorization.k8s.io/snapshot-controller-role created
role.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
rolebinding.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
deployment.apps/snapshot-controller created
stderr:
error: resource mapping not found for name: "csi-hostpath-snapclass" namespace: "" from "/etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml": no matches for kind "VolumeSnapshotClass" in version "snapshot.storage.k8s.io/v1"
ensure CRDs are installed first
I0213 23:02:29.994778 74928 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=registry" in ns "kube-system" ...
I0213 23:02:30.063610 74928 kapi.go:86] Found 3 Pods for label selector app.kubernetes.io/name=ingress-nginx
I0213 23:02:30.063638 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:30.066068 74928 kapi.go:86] Found 1 Pods for label selector kubernetes.io/minikube-addons=registry
I0213 23:02:30.066133 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
W0213 23:02:30.067017 74928 out.go:239] ! Enabling 'storage-provisioner-rancher' returned an error: running callbacks: [Error making local-path the default storage class: Error while marking storage class local-path as default: Operation cannot be fulfilled on storageclasses.storage.k8s.io "local-path": the object has been modified; please apply your changes to the latest version and try again]
I0213 23:02:30.133078 74928 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply --force -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I0213 23:02:30.144860 74928 ssh_runner.go:362] scp memory --> /var/lib/minikube/google_cloud_project (12 bytes)
I0213 23:02:30.176952 74928 addons.go:234] Setting addon gcp-auth=true in "addons-913502"
I0213 23:02:30.177029 74928 host.go:66] Checking if "addons-913502" exists ...
I0213 23:02:30.177460 74928 cli_runner.go:164] Run: docker container inspect addons-913502 --format={{.State.Status}}
I0213 23:02:30.201007 74928 ssh_runner.go:195] Run: cat /var/lib/minikube/google_application_credentials.json
I0213 23:02:30.201079 74928 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-913502
I0213 23:02:30.220078 74928 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32772 SSHKeyPath:/home/jenkins/minikube-integration/18169-66678/.minikube/machines/addons-913502/id_rsa Username:docker}
I0213 23:02:30.488245 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:30.499069 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:30.982712 74928 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/rbac-external-attacher.yaml -f /etc/kubernetes/addons/rbac-hostpath.yaml -f /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml -f /etc/kubernetes/addons/rbac-external-provisioner.yaml -f /etc/kubernetes/addons/rbac-external-resizer.yaml -f /etc/kubernetes/addons/rbac-external-snapshotter.yaml -f /etc/kubernetes/addons/csi-hostpath-attacher.yaml -f /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml -f /etc/kubernetes/addons/csi-hostpath-plugin.yaml -f /etc/kubernetes/addons/csi-hostpath-resizer.yaml -f /etc/kubernetes/addons/csi-hostpath-storageclass.yaml: (3.214645965s)
I0213 23:02:30.982756 74928 addons.go:470] Verifying addon csi-hostpath-driver=true in "addons-913502"
I0213 23:02:30.985542 74928 out.go:177] * Verifying csi-hostpath-driver addon...
I0213 23:02:30.988078 74928 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=csi-hostpath-driver" in ns "kube-system" ...
I0213 23:02:30.988511 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:30.992836 74928 kapi.go:86] Found 2 Pods for label selector kubernetes.io/minikube-addons=csi-hostpath-driver
I0213 23:02:30.992860 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:30.997896 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:31.263697 74928 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply --force -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: (1.130533125s)
I0213 23:02:31.263744 74928 ssh_runner.go:235] Completed: cat /var/lib/minikube/google_application_credentials.json: (1.062704974s)
I0213 23:02:31.265775 74928 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20231011-8b53cabe0
I0213 23:02:31.267270 74928 out.go:177] - Using image gcr.io/k8s-minikube/gcp-auth-webhook:v0.1.0
I0213 23:02:31.268627 74928 addons.go:426] installing /etc/kubernetes/addons/gcp-auth-ns.yaml
I0213 23:02:31.268650 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/gcp-auth-ns.yaml (700 bytes)
I0213 23:02:31.285679 74928 addons.go:426] installing /etc/kubernetes/addons/gcp-auth-service.yaml
I0213 23:02:31.285706 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/gcp-auth-service.yaml (788 bytes)
I0213 23:02:31.301953 74928 addons.go:426] installing /etc/kubernetes/addons/gcp-auth-webhook.yaml
I0213 23:02:31.301976 74928 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/gcp-auth-webhook.yaml (5432 bytes)
I0213 23:02:31.318463 74928 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/gcp-auth-ns.yaml -f /etc/kubernetes/addons/gcp-auth-service.yaml -f /etc/kubernetes/addons/gcp-auth-webhook.yaml
I0213 23:02:31.489837 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:31.492840 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:31.500586 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:31.768196 74928 addons.go:470] Verifying addon gcp-auth=true in "addons-913502"
I0213 23:02:31.770017 74928 out.go:177] * Verifying gcp-auth addon...
I0213 23:02:31.772404 74928 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=gcp-auth" in ns "gcp-auth" ...
I0213 23:02:31.775066 74928 kapi.go:86] Found 1 Pods for label selector kubernetes.io/minikube-addons=gcp-auth
I0213 23:02:31.775086 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:31.988757 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:31.993045 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:32.010923 74928 node_ready.go:58] node "addons-913502" has status "Ready":"False"
I0213 23:02:32.012048 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:32.276134 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:32.489242 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:32.491967 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:32.498625 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:32.776548 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:32.988948 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:32.991643 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:32.998362 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:33.276706 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:33.568220 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:33.568639 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:33.569337 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:33.775962 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:33.989717 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:33.992957 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:33.998584 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:34.276538 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:34.564927 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:34.566203 74928 node_ready.go:58] node "addons-913502" has status "Ready":"False"
I0213 23:02:34.567003 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:34.568645 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:34.776751 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:34.990868 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:34.994129 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:34.999067 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:35.277284 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:35.489832 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:35.492724 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:35.498427 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:35.776948 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:35.989013 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:35.992193 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:35.999151 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:36.277123 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:36.488777 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:36.492466 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:36.498976 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:36.776433 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:36.989118 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:36.991704 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:36.994633 74928 node_ready.go:58] node "addons-913502" has status "Ready":"False"
I0213 23:02:36.999086 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:37.276402 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:37.489274 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:37.491868 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:37.498371 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:37.776537 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:37.989294 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:37.991948 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:37.997920 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:38.275919 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:38.488500 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:38.491750 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:38.497858 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:38.776298 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:38.989352 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:38.991620 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:38.994795 74928 node_ready.go:58] node "addons-913502" has status "Ready":"False"
I0213 23:02:38.997867 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:39.276196 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:39.488986 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:39.492912 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:39.497960 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:39.777073 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:39.988750 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:39.992266 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:39.997861 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:40.278608 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:40.488739 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:40.492429 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:40.498056 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:40.776349 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:40.989541 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:40.992147 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:40.994911 74928 node_ready.go:58] node "addons-913502" has status "Ready":"False"
I0213 23:02:40.998314 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:41.276465 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:41.489115 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:41.491745 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:41.498063 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:41.776223 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:41.989203 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:41.992603 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:41.997783 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:42.275894 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:42.488422 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:42.491821 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:42.497739 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:42.776101 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:42.988855 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:42.992378 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:43.000152 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:43.276474 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:43.489015 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:43.491441 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:43.494388 74928 node_ready.go:58] node "addons-913502" has status "Ready":"False"
I0213 23:02:43.498084 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:43.776658 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:43.988288 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:43.991547 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:43.998261 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:44.276372 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:44.488896 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:44.491644 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:44.498464 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:44.776532 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:44.989088 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:44.991493 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:44.998005 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:45.276191 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:45.488607 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:45.491876 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:45.497817 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:45.776027 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:45.988622 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:45.992102 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:45.994667 74928 node_ready.go:58] node "addons-913502" has status "Ready":"False"
I0213 23:02:45.998396 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:46.276528 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:46.488596 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:46.491740 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:46.498749 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:46.776011 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:46.988972 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:46.992278 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:46.998016 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:47.276390 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:47.489274 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:47.491599 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:47.498221 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:47.776109 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:47.988770 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:47.991917 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:47.998108 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:48.276233 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:48.489071 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:48.491556 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:48.494551 74928 node_ready.go:58] node "addons-913502" has status "Ready":"False"
I0213 23:02:48.498067 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:48.776156 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:48.989232 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:48.991795 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:48.998784 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:49.276162 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:49.488644 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:49.492302 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:49.498751 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:49.775921 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:49.988269 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:49.991470 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:49.997979 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:50.276130 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:50.488253 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:50.491560 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:50.498064 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:50.776234 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:50.988621 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:50.991756 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:50.993946 74928 node_ready.go:58] node "addons-913502" has status "Ready":"False"
I0213 23:02:50.998748 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:51.275941 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:51.488758 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:51.492380 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:51.498252 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:51.776635 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:51.988856 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:51.992028 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:51.998634 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:52.275774 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:52.489878 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:52.492417 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:52.497802 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:52.775936 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:52.988344 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:52.991580 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:52.994617 74928 node_ready.go:58] node "addons-913502" has status "Ready":"False"
I0213 23:02:52.998237 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:53.276532 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:53.489007 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:53.491485 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:53.497881 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:53.776240 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:53.989145 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:53.991414 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:53.998472 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:54.276494 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:54.489628 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:54.491806 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:54.497928 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:54.776011 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:54.988525 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:54.991612 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:54.998115 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:55.276636 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:55.489205 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:55.491374 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:55.494330 74928 node_ready.go:58] node "addons-913502" has status "Ready":"False"
I0213 23:02:55.497840 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:55.775886 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:55.988988 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:55.992887 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:55.998088 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:56.276285 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:56.489013 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:56.492849 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:56.498657 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:56.775866 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:56.988357 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:56.992015 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:56.998177 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:57.280070 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:57.489235 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:57.493840 74928 kapi.go:86] Found 3 Pods for label selector kubernetes.io/minikube-addons=csi-hostpath-driver
I0213 23:02:57.493868 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:57.495082 74928 node_ready.go:49] node "addons-913502" has status "Ready":"True"
I0213 23:02:57.495106 74928 node_ready.go:38] duration metric: took 27.50339897s waiting for node "addons-913502" to be "Ready" ...
I0213 23:02:57.495119 74928 pod_ready.go:35] extra waiting up to 6m0s for all system-critical pods including labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0213 23:02:57.498618 74928 kapi.go:86] Found 2 Pods for label selector kubernetes.io/minikube-addons=registry
I0213 23:02:57.498637 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:57.503479 74928 pod_ready.go:78] waiting up to 6m0s for pod "coredns-5dd5756b68-kw9vb" in "kube-system" namespace to be "Ready" ...
I0213 23:02:57.776437 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:57.990216 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:57.994691 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:58.067730 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:58.277049 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:58.490459 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:58.494500 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:58.564373 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:58.776246 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:58.989822 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:58.993568 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:59.000232 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:59.009640 74928 pod_ready.go:92] pod "coredns-5dd5756b68-kw9vb" in "kube-system" namespace has status "Ready":"True"
I0213 23:02:59.009682 74928 pod_ready.go:81] duration metric: took 1.50617685s waiting for pod "coredns-5dd5756b68-kw9vb" in "kube-system" namespace to be "Ready" ...
I0213 23:02:59.009715 74928 pod_ready.go:78] waiting up to 6m0s for pod "etcd-addons-913502" in "kube-system" namespace to be "Ready" ...
I0213 23:02:59.015867 74928 pod_ready.go:92] pod "etcd-addons-913502" in "kube-system" namespace has status "Ready":"True"
I0213 23:02:59.016025 74928 pod_ready.go:81] duration metric: took 6.292931ms waiting for pod "etcd-addons-913502" in "kube-system" namespace to be "Ready" ...
I0213 23:02:59.016069 74928 pod_ready.go:78] waiting up to 6m0s for pod "kube-apiserver-addons-913502" in "kube-system" namespace to be "Ready" ...
I0213 23:02:59.021130 74928 pod_ready.go:92] pod "kube-apiserver-addons-913502" in "kube-system" namespace has status "Ready":"True"
I0213 23:02:59.021151 74928 pod_ready.go:81] duration metric: took 5.051562ms waiting for pod "kube-apiserver-addons-913502" in "kube-system" namespace to be "Ready" ...
I0213 23:02:59.021161 74928 pod_ready.go:78] waiting up to 6m0s for pod "kube-controller-manager-addons-913502" in "kube-system" namespace to be "Ready" ...
I0213 23:02:59.025792 74928 pod_ready.go:92] pod "kube-controller-manager-addons-913502" in "kube-system" namespace has status "Ready":"True"
I0213 23:02:59.025814 74928 pod_ready.go:81] duration metric: took 4.647644ms waiting for pod "kube-controller-manager-addons-913502" in "kube-system" namespace to be "Ready" ...
I0213 23:02:59.025825 74928 pod_ready.go:78] waiting up to 6m0s for pod "kube-proxy-dd5xd" in "kube-system" namespace to be "Ready" ...
I0213 23:02:59.096511 74928 pod_ready.go:92] pod "kube-proxy-dd5xd" in "kube-system" namespace has status "Ready":"True"
I0213 23:02:59.096538 74928 pod_ready.go:81] duration metric: took 70.705683ms waiting for pod "kube-proxy-dd5xd" in "kube-system" namespace to be "Ready" ...
I0213 23:02:59.096551 74928 pod_ready.go:78] waiting up to 6m0s for pod "kube-scheduler-addons-913502" in "kube-system" namespace to be "Ready" ...
I0213 23:02:59.277144 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:59.489134 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:59.493577 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:59.495254 74928 pod_ready.go:92] pod "kube-scheduler-addons-913502" in "kube-system" namespace has status "Ready":"True"
I0213 23:02:59.495277 74928 pod_ready.go:81] duration metric: took 398.717851ms waiting for pod "kube-scheduler-addons-913502" in "kube-system" namespace to be "Ready" ...
I0213 23:02:59.495290 74928 pod_ready.go:78] waiting up to 6m0s for pod "metrics-server-69cf46c98-jv886" in "kube-system" namespace to be "Ready" ...
I0213 23:02:59.498215 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:02:59.776473 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:02:59.989407 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:02:59.993428 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:02:59.999344 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:03:00.276454 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:00.489459 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:00.493607 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:00.499788 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:03:00.776214 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:00.988640 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:00.993118 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:00.999265 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:03:01.276373 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:01.489059 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:01.493873 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:01.499773 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:03:01.501201 74928 pod_ready.go:102] pod "metrics-server-69cf46c98-jv886" in "kube-system" namespace has status "Ready":"False"
I0213 23:03:01.776629 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:01.990896 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:01.995167 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:01.999268 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:03:02.276433 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:02.490179 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:02.494417 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:02.499522 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:03:02.777269 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:02.989030 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:02.992998 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:03.063872 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:03:03.276863 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:03.488623 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:03.493630 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:03.499581 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:03:03.776246 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:03.989471 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:03.992793 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:03.998620 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:03:04.000709 74928 pod_ready.go:102] pod "metrics-server-69cf46c98-jv886" in "kube-system" namespace has status "Ready":"False"
I0213 23:03:04.276802 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:04.489954 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:04.498124 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:04.573570 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:03:04.779890 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:04.991010 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:04.996262 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:05.063797 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:03:05.074497 74928 pod_ready.go:92] pod "metrics-server-69cf46c98-jv886" in "kube-system" namespace has status "Ready":"True"
I0213 23:03:05.074524 74928 pod_ready.go:81] duration metric: took 5.579225583s waiting for pod "metrics-server-69cf46c98-jv886" in "kube-system" namespace to be "Ready" ...
I0213 23:03:05.074538 74928 pod_ready.go:78] waiting up to 6m0s for pod "nvidia-device-plugin-daemonset-nwpfp" in "kube-system" namespace to be "Ready" ...
I0213 23:03:05.276563 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:05.490088 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:05.494009 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:05.499509 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:03:05.776388 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:05.989429 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:05.992927 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:05.999260 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:03:06.276516 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:06.489262 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:06.493786 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:06.500276 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:03:06.777065 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:06.989395 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:06.995048 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:06.998908 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:03:07.080790 74928 pod_ready.go:102] pod "nvidia-device-plugin-daemonset-nwpfp" in "kube-system" namespace has status "Ready":"False"
I0213 23:03:07.276118 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:07.489370 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:07.494330 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:07.499432 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:03:07.775778 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:07.989798 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:07.993149 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:07.999099 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:03:08.276524 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:08.490020 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:08.493124 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:08.499013 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:03:08.783920 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:08.989552 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:08.993279 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:09.062772 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:03:09.081649 74928 pod_ready.go:102] pod "nvidia-device-plugin-daemonset-nwpfp" in "kube-system" namespace has status "Ready":"False"
I0213 23:03:09.276775 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:09.490224 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:09.494229 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:09.498742 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:03:09.775923 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:09.988494 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:09.993209 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:09.999395 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:03:10.275851 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:10.488901 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:10.493997 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:10.499846 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:03:10.776718 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:10.990162 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:11.064604 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:03:11.066019 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:11.081911 74928 pod_ready.go:102] pod "nvidia-device-plugin-daemonset-nwpfp" in "kube-system" namespace has status "Ready":"False"
I0213 23:03:11.277010 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:11.489970 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:11.493571 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:11.499981 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:03:11.777250 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:11.989224 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:11.993444 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:11.999431 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:03:12.276192 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:12.490420 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:12.493751 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:12.498823 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:03:12.776728 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:12.989754 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:12.993655 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:13.000565 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:03:13.276164 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:13.489285 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:13.493196 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:13.498860 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:03:13.581546 74928 pod_ready.go:102] pod "nvidia-device-plugin-daemonset-nwpfp" in "kube-system" namespace has status "Ready":"False"
I0213 23:03:13.777901 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:14.073081 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:14.074353 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:14.075044 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:03:14.276581 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:14.491424 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:14.562670 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:14.565505 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:03:14.776000 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:14.990208 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:14.994885 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:15.004998 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:03:15.275915 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:15.489615 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:15.493465 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:15.499534 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:03:15.776627 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:15.990303 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:15.994920 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:15.999508 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:03:16.082357 74928 pod_ready.go:102] pod "nvidia-device-plugin-daemonset-nwpfp" in "kube-system" namespace has status "Ready":"False"
I0213 23:03:16.276625 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:16.489789 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:16.493925 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:16.499830 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:03:16.776554 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:16.990502 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:16.994367 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:16.999762 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:03:17.277285 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:17.489591 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:17.494173 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:17.500170 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:03:17.777163 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:17.989528 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:17.992916 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:17.998693 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:03:18.084076 74928 pod_ready.go:102] pod "nvidia-device-plugin-daemonset-nwpfp" in "kube-system" namespace has status "Ready":"False"
I0213 23:03:18.275964 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:18.489466 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:18.492520 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:18.499761 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:03:18.775566 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:18.989542 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:18.992909 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:18.999047 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:03:19.275923 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:19.490371 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:19.494521 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:19.499214 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:03:19.776217 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:19.991417 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:19.994075 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:19.999328 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:03:20.276240 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:20.488988 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:20.493622 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:20.499245 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:03:20.581713 74928 pod_ready.go:102] pod "nvidia-device-plugin-daemonset-nwpfp" in "kube-system" namespace has status "Ready":"False"
I0213 23:03:20.775853 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:20.989494 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:20.992805 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:20.998636 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0213 23:03:21.276731 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:21.489815 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:21.495782 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:21.499626 74928 kapi.go:107] duration metric: took 51.504843171s to wait for kubernetes.io/minikube-addons=registry ...
I0213 23:03:21.776301 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:21.989424 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:21.992867 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:22.275738 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:22.489608 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:22.493662 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:22.776894 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:22.988976 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:22.997020 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:23.080386 74928 pod_ready.go:102] pod "nvidia-device-plugin-daemonset-nwpfp" in "kube-system" namespace has status "Ready":"False"
I0213 23:03:23.276801 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:23.492180 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:23.496237 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:23.777153 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:23.989514 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:23.993837 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:24.276715 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:24.490054 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:24.493615 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:24.776523 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:24.989428 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:24.993475 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:25.081038 74928 pod_ready.go:102] pod "nvidia-device-plugin-daemonset-nwpfp" in "kube-system" namespace has status "Ready":"False"
I0213 23:03:25.276737 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:25.490306 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:25.494154 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:25.776533 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:25.990427 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:25.993572 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:26.276625 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:26.489638 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:26.493258 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:26.777050 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:26.989143 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:26.993942 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:27.081166 74928 pod_ready.go:102] pod "nvidia-device-plugin-daemonset-nwpfp" in "kube-system" namespace has status "Ready":"False"
I0213 23:03:27.276550 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:27.489536 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:27.492536 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:27.776690 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:27.989478 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:27.992784 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:28.276575 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:28.489131 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:28.492773 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:28.776620 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:28.989419 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:28.992736 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:29.082442 74928 pod_ready.go:102] pod "nvidia-device-plugin-daemonset-nwpfp" in "kube-system" namespace has status "Ready":"False"
I0213 23:03:29.276004 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:29.489755 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:29.493307 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:29.777364 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:30.080543 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:30.081767 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:30.362272 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:30.491911 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:30.564414 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:30.777338 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:30.989889 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:30.993703 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:31.308308 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:31.492979 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:31.494146 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:31.581931 74928 pod_ready.go:102] pod "nvidia-device-plugin-daemonset-nwpfp" in "kube-system" namespace has status "Ready":"False"
I0213 23:03:31.775929 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:31.989593 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:31.993989 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:32.276473 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:32.490090 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:32.494609 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:32.777555 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:32.990148 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:32.994441 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:33.276694 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:33.489411 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:33.493097 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:33.776089 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:33.989014 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:33.993222 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:34.081800 74928 pod_ready.go:102] pod "nvidia-device-plugin-daemonset-nwpfp" in "kube-system" namespace has status "Ready":"False"
I0213 23:03:34.275667 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:34.490274 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:34.493636 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:34.776270 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:34.990020 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:34.993233 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:35.276640 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:35.489609 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:35.493145 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:35.581013 74928 pod_ready.go:92] pod "nvidia-device-plugin-daemonset-nwpfp" in "kube-system" namespace has status "Ready":"True"
I0213 23:03:35.581037 74928 pod_ready.go:81] duration metric: took 30.506491278s waiting for pod "nvidia-device-plugin-daemonset-nwpfp" in "kube-system" namespace to be "Ready" ...
I0213 23:03:35.581055 74928 pod_ready.go:38] duration metric: took 38.085921931s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0213 23:03:35.581075 74928 api_server.go:52] waiting for apiserver process to appear ...
I0213 23:03:35.581147 74928 cri.go:54] listing CRI containers in root : {State:all Name:kube-apiserver Namespaces:[]}
I0213 23:03:35.581278 74928 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I0213 23:03:35.617186 74928 cri.go:89] found id: "8147d14fedd8c6540430e8abd97dd62cc925f2af68896a21c6414005e96c1b27"
I0213 23:03:35.617210 74928 cri.go:89] found id: ""
I0213 23:03:35.617220 74928 logs.go:276] 1 containers: [8147d14fedd8c6540430e8abd97dd62cc925f2af68896a21c6414005e96c1b27]
I0213 23:03:35.617281 74928 ssh_runner.go:195] Run: which crictl
I0213 23:03:35.621317 74928 cri.go:54] listing CRI containers in root : {State:all Name:etcd Namespaces:[]}
I0213 23:03:35.621387 74928 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I0213 23:03:35.684656 74928 cri.go:89] found id: "01eb55b3820206495d80eef7ea79010ec8edebedb21356a351659d7ffe72b775"
I0213 23:03:35.684682 74928 cri.go:89] found id: ""
I0213 23:03:35.684693 74928 logs.go:276] 1 containers: [01eb55b3820206495d80eef7ea79010ec8edebedb21356a351659d7ffe72b775]
I0213 23:03:35.684746 74928 ssh_runner.go:195] Run: which crictl
I0213 23:03:35.688029 74928 cri.go:54] listing CRI containers in root : {State:all Name:coredns Namespaces:[]}
I0213 23:03:35.688086 74928 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I0213 23:03:35.722548 74928 cri.go:89] found id: "dfc72942996f7a7b0ebf8ea4591e12da82229d30e2590234eb4f019274e9fa03"
I0213 23:03:35.722574 74928 cri.go:89] found id: ""
I0213 23:03:35.722583 74928 logs.go:276] 1 containers: [dfc72942996f7a7b0ebf8ea4591e12da82229d30e2590234eb4f019274e9fa03]
I0213 23:03:35.722640 74928 ssh_runner.go:195] Run: which crictl
I0213 23:03:35.762552 74928 cri.go:54] listing CRI containers in root : {State:all Name:kube-scheduler Namespaces:[]}
I0213 23:03:35.762635 74928 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I0213 23:03:35.776969 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:35.800502 74928 cri.go:89] found id: "65dbb4b3290b81d79b9ed0f3ebb32d5c3931cea62c5ca1a83a3e5ced0af68aa6"
I0213 23:03:35.800532 74928 cri.go:89] found id: ""
I0213 23:03:35.800544 74928 logs.go:276] 1 containers: [65dbb4b3290b81d79b9ed0f3ebb32d5c3931cea62c5ca1a83a3e5ced0af68aa6]
I0213 23:03:35.800599 74928 ssh_runner.go:195] Run: which crictl
I0213 23:03:35.804048 74928 cri.go:54] listing CRI containers in root : {State:all Name:kube-proxy Namespaces:[]}
I0213 23:03:35.804109 74928 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I0213 23:03:35.863719 74928 cri.go:89] found id: "a532fb0a6d4f40f6d4e96acc87a0c7a609cd1530b100bc6bdb2954f614d7ce5f"
I0213 23:03:35.863751 74928 cri.go:89] found id: ""
I0213 23:03:35.863762 74928 logs.go:276] 1 containers: [a532fb0a6d4f40f6d4e96acc87a0c7a609cd1530b100bc6bdb2954f614d7ce5f]
I0213 23:03:35.863830 74928 ssh_runner.go:195] Run: which crictl
I0213 23:03:35.867587 74928 cri.go:54] listing CRI containers in root : {State:all Name:kube-controller-manager Namespaces:[]}
I0213 23:03:35.867656 74928 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I0213 23:03:35.902274 74928 cri.go:89] found id: "926999f79520bbbaa76881e1d82a66d6c9dbff9b40626be7aec26bba143ebef4"
I0213 23:03:35.902301 74928 cri.go:89] found id: ""
I0213 23:03:35.902312 74928 logs.go:276] 1 containers: [926999f79520bbbaa76881e1d82a66d6c9dbff9b40626be7aec26bba143ebef4]
I0213 23:03:35.902366 74928 ssh_runner.go:195] Run: which crictl
I0213 23:03:35.905836 74928 cri.go:54] listing CRI containers in root : {State:all Name:kindnet Namespaces:[]}
I0213 23:03:35.905896 74928 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I0213 23:03:35.979706 74928 cri.go:89] found id: "2c7ce000c4787cd59b99f0aa3712df814630b5df86dc8d4106851ef9fb5e528f"
I0213 23:03:35.979730 74928 cri.go:89] found id: ""
I0213 23:03:35.979737 74928 logs.go:276] 1 containers: [2c7ce000c4787cd59b99f0aa3712df814630b5df86dc8d4106851ef9fb5e528f]
I0213 23:03:35.979783 74928 ssh_runner.go:195] Run: which crictl
I0213 23:03:35.982995 74928 logs.go:123] Gathering logs for CRI-O ...
I0213 23:03:35.983017 74928 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u crio -n 400"
I0213 23:03:35.988345 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:35.992709 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:36.054384 74928 logs.go:123] Gathering logs for etcd [01eb55b3820206495d80eef7ea79010ec8edebedb21356a351659d7ffe72b775] ...
I0213 23:03:36.054423 74928 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 01eb55b3820206495d80eef7ea79010ec8edebedb21356a351659d7ffe72b775"
I0213 23:03:36.092394 74928 logs.go:123] Gathering logs for kube-scheduler [65dbb4b3290b81d79b9ed0f3ebb32d5c3931cea62c5ca1a83a3e5ced0af68aa6] ...
I0213 23:03:36.092425 74928 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 65dbb4b3290b81d79b9ed0f3ebb32d5c3931cea62c5ca1a83a3e5ced0af68aa6"
I0213 23:03:36.130467 74928 logs.go:123] Gathering logs for kube-proxy [a532fb0a6d4f40f6d4e96acc87a0c7a609cd1530b100bc6bdb2954f614d7ce5f] ...
I0213 23:03:36.130501 74928 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 a532fb0a6d4f40f6d4e96acc87a0c7a609cd1530b100bc6bdb2954f614d7ce5f"
I0213 23:03:36.163074 74928 logs.go:123] Gathering logs for kindnet [2c7ce000c4787cd59b99f0aa3712df814630b5df86dc8d4106851ef9fb5e528f] ...
I0213 23:03:36.163107 74928 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 2c7ce000c4787cd59b99f0aa3712df814630b5df86dc8d4106851ef9fb5e528f"
I0213 23:03:36.198192 74928 logs.go:123] Gathering logs for coredns [dfc72942996f7a7b0ebf8ea4591e12da82229d30e2590234eb4f019274e9fa03] ...
I0213 23:03:36.198233 74928 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 dfc72942996f7a7b0ebf8ea4591e12da82229d30e2590234eb4f019274e9fa03"
I0213 23:03:36.244845 74928 logs.go:123] Gathering logs for kube-controller-manager [926999f79520bbbaa76881e1d82a66d6c9dbff9b40626be7aec26bba143ebef4] ...
I0213 23:03:36.244890 74928 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 926999f79520bbbaa76881e1d82a66d6c9dbff9b40626be7aec26bba143ebef4"
I0213 23:03:36.276588 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:36.301000 74928 logs.go:123] Gathering logs for container status ...
I0213 23:03:36.301041 74928 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I0213 23:03:36.341482 74928 logs.go:123] Gathering logs for kubelet ...
I0213 23:03:36.341518 74928 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
W0213 23:03:36.389644 74928 logs.go:138] Found kubelet problem: Feb 13 23:02:29 addons-913502 kubelet[1554]: W0213 23:02:29.584745 1554 reflector.go:535] object-"gadget"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:addons-913502" cannot list resource "configmaps" in API group "" in the namespace "gadget": no relationship found between node 'addons-913502' and this object
W0213 23:03:36.389843 74928 logs.go:138] Found kubelet problem: Feb 13 23:02:29 addons-913502 kubelet[1554]: E0213 23:02:29.584796 1554 reflector.go:147] object-"gadget"/"kube-root-ca.crt": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:addons-913502" cannot list resource "configmaps" in API group "" in the namespace "gadget": no relationship found between node 'addons-913502' and this object
I0213 23:03:36.421684 74928 logs.go:123] Gathering logs for dmesg ...
I0213 23:03:36.421726 74928 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I0213 23:03:36.435954 74928 logs.go:123] Gathering logs for describe nodes ...
I0213 23:03:36.435985 74928 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.4/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
I0213 23:03:36.489938 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:36.494188 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:36.539605 74928 logs.go:123] Gathering logs for kube-apiserver [8147d14fedd8c6540430e8abd97dd62cc925f2af68896a21c6414005e96c1b27] ...
I0213 23:03:36.539642 74928 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 8147d14fedd8c6540430e8abd97dd62cc925f2af68896a21c6414005e96c1b27"
I0213 23:03:36.586543 74928 out.go:304] Setting ErrFile to fd 2...
I0213 23:03:36.586586 74928 out.go:338] TERM=,COLORTERM=, which probably does not support color
W0213 23:03:36.586663 74928 out.go:239] X Problems detected in kubelet:
W0213 23:03:36.586679 74928 out.go:239] Feb 13 23:02:29 addons-913502 kubelet[1554]: W0213 23:02:29.584745 1554 reflector.go:535] object-"gadget"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:addons-913502" cannot list resource "configmaps" in API group "" in the namespace "gadget": no relationship found between node 'addons-913502' and this object
W0213 23:03:36.586691 74928 out.go:239] Feb 13 23:02:29 addons-913502 kubelet[1554]: E0213 23:02:29.584796 1554 reflector.go:147] object-"gadget"/"kube-root-ca.crt": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:addons-913502" cannot list resource "configmaps" in API group "" in the namespace "gadget": no relationship found between node 'addons-913502' and this object
I0213 23:03:36.586708 74928 out.go:304] Setting ErrFile to fd 2...
I0213 23:03:36.586762 74928 out.go:338] TERM=,COLORTERM=, which probably does not support color
I0213 23:03:36.776621 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:36.989572 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:36.993091 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:37.276421 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:37.489710 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:37.492883 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:37.777172 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:37.989549 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:37.992840 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:38.276607 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:38.489509 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:38.493699 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:38.775985 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0213 23:03:38.989519 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:38.993401 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:39.277443 74928 kapi.go:107] duration metric: took 1m7.505036996s to wait for kubernetes.io/minikube-addons=gcp-auth ...
I0213 23:03:39.279241 74928 out.go:177] * Your GCP credentials will now be mounted into every pod created in the addons-913502 cluster.
I0213 23:03:39.280680 74928 out.go:177] * If you don't want your credentials mounted into a specific pod, add a label with the `gcp-auth-skip-secret` key to your pod configuration.
I0213 23:03:39.282325 74928 out.go:177] * If you want existing pods to be mounted with credentials, either recreate them or rerun addons enable with --refresh.
I0213 23:03:39.490640 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:39.493505 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:39.989163 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:39.993580 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:40.573519 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:40.575385 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:40.989337 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:41.077821 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:41.567019 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:41.567603 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:41.989975 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:42.065246 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:42.489447 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:42.493849 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:42.989720 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:42.993375 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:43.489699 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:43.493378 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:43.989488 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:43.993191 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:44.489277 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:44.494104 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:44.989106 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:44.994890 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:45.489629 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:45.493461 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:45.988693 74928 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0213 23:03:45.993714 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:46.489926 74928 kapi.go:107] duration metric: took 1m16.505286816s to wait for app.kubernetes.io/name=ingress-nginx ...
I0213 23:03:46.493301 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:46.587982 74928 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0213 23:03:46.601144 74928 api_server.go:72] duration metric: took 1m23.036993681s to wait for apiserver process to appear ...
I0213 23:03:46.601179 74928 api_server.go:88] waiting for apiserver healthz status ...
I0213 23:03:46.601221 74928 cri.go:54] listing CRI containers in root : {State:all Name:kube-apiserver Namespaces:[]}
I0213 23:03:46.601273 74928 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I0213 23:03:46.635621 74928 cri.go:89] found id: "8147d14fedd8c6540430e8abd97dd62cc925f2af68896a21c6414005e96c1b27"
I0213 23:03:46.635654 74928 cri.go:89] found id: ""
I0213 23:03:46.635682 74928 logs.go:276] 1 containers: [8147d14fedd8c6540430e8abd97dd62cc925f2af68896a21c6414005e96c1b27]
I0213 23:03:46.635744 74928 ssh_runner.go:195] Run: which crictl
I0213 23:03:46.639090 74928 cri.go:54] listing CRI containers in root : {State:all Name:etcd Namespaces:[]}
I0213 23:03:46.639147 74928 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I0213 23:03:46.675663 74928 cri.go:89] found id: "01eb55b3820206495d80eef7ea79010ec8edebedb21356a351659d7ffe72b775"
I0213 23:03:46.675694 74928 cri.go:89] found id: ""
I0213 23:03:46.675704 74928 logs.go:276] 1 containers: [01eb55b3820206495d80eef7ea79010ec8edebedb21356a351659d7ffe72b775]
I0213 23:03:46.675764 74928 ssh_runner.go:195] Run: which crictl
I0213 23:03:46.679390 74928 cri.go:54] listing CRI containers in root : {State:all Name:coredns Namespaces:[]}
I0213 23:03:46.679470 74928 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I0213 23:03:46.767335 74928 cri.go:89] found id: "dfc72942996f7a7b0ebf8ea4591e12da82229d30e2590234eb4f019274e9fa03"
I0213 23:03:46.767369 74928 cri.go:89] found id: ""
I0213 23:03:46.767380 74928 logs.go:276] 1 containers: [dfc72942996f7a7b0ebf8ea4591e12da82229d30e2590234eb4f019274e9fa03]
I0213 23:03:46.767439 74928 ssh_runner.go:195] Run: which crictl
I0213 23:03:46.770887 74928 cri.go:54] listing CRI containers in root : {State:all Name:kube-scheduler Namespaces:[]}
I0213 23:03:46.770968 74928 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I0213 23:03:46.808198 74928 cri.go:89] found id: "65dbb4b3290b81d79b9ed0f3ebb32d5c3931cea62c5ca1a83a3e5ced0af68aa6"
I0213 23:03:46.808232 74928 cri.go:89] found id: ""
I0213 23:03:46.808244 74928 logs.go:276] 1 containers: [65dbb4b3290b81d79b9ed0f3ebb32d5c3931cea62c5ca1a83a3e5ced0af68aa6]
I0213 23:03:46.808308 74928 ssh_runner.go:195] Run: which crictl
I0213 23:03:46.812865 74928 cri.go:54] listing CRI containers in root : {State:all Name:kube-proxy Namespaces:[]}
I0213 23:03:46.812934 74928 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I0213 23:03:46.889497 74928 cri.go:89] found id: "a532fb0a6d4f40f6d4e96acc87a0c7a609cd1530b100bc6bdb2954f614d7ce5f"
I0213 23:03:46.889523 74928 cri.go:89] found id: ""
I0213 23:03:46.889534 74928 logs.go:276] 1 containers: [a532fb0a6d4f40f6d4e96acc87a0c7a609cd1530b100bc6bdb2954f614d7ce5f]
I0213 23:03:46.889597 74928 ssh_runner.go:195] Run: which crictl
I0213 23:03:46.893092 74928 cri.go:54] listing CRI containers in root : {State:all Name:kube-controller-manager Namespaces:[]}
I0213 23:03:46.893171 74928 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I0213 23:03:47.061996 74928 cri.go:89] found id: "926999f79520bbbaa76881e1d82a66d6c9dbff9b40626be7aec26bba143ebef4"
I0213 23:03:47.062026 74928 cri.go:89] found id: ""
I0213 23:03:47.062037 74928 logs.go:276] 1 containers: [926999f79520bbbaa76881e1d82a66d6c9dbff9b40626be7aec26bba143ebef4]
I0213 23:03:47.062094 74928 ssh_runner.go:195] Run: which crictl
I0213 23:03:47.067232 74928 cri.go:54] listing CRI containers in root : {State:all Name:kindnet Namespaces:[]}
I0213 23:03:47.067305 74928 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I0213 23:03:47.068506 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:47.104504 74928 cri.go:89] found id: "2c7ce000c4787cd59b99f0aa3712df814630b5df86dc8d4106851ef9fb5e528f"
I0213 23:03:47.104534 74928 cri.go:89] found id: ""
I0213 23:03:47.104545 74928 logs.go:276] 1 containers: [2c7ce000c4787cd59b99f0aa3712df814630b5df86dc8d4106851ef9fb5e528f]
I0213 23:03:47.104681 74928 ssh_runner.go:195] Run: which crictl
I0213 23:03:47.161685 74928 logs.go:123] Gathering logs for etcd [01eb55b3820206495d80eef7ea79010ec8edebedb21356a351659d7ffe72b775] ...
I0213 23:03:47.161789 74928 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 01eb55b3820206495d80eef7ea79010ec8edebedb21356a351659d7ffe72b775"
I0213 23:03:47.277306 74928 logs.go:123] Gathering logs for coredns [dfc72942996f7a7b0ebf8ea4591e12da82229d30e2590234eb4f019274e9fa03] ...
I0213 23:03:47.277343 74928 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 dfc72942996f7a7b0ebf8ea4591e12da82229d30e2590234eb4f019274e9fa03"
I0213 23:03:47.330898 74928 logs.go:123] Gathering logs for kube-controller-manager [926999f79520bbbaa76881e1d82a66d6c9dbff9b40626be7aec26bba143ebef4] ...
I0213 23:03:47.330949 74928 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 926999f79520bbbaa76881e1d82a66d6c9dbff9b40626be7aec26bba143ebef4"
I0213 23:03:47.427398 74928 logs.go:123] Gathering logs for CRI-O ...
I0213 23:03:47.427439 74928 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u crio -n 400"
I0213 23:03:47.493781 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:47.534906 74928 logs.go:123] Gathering logs for kubelet ...
I0213 23:03:47.534951 74928 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
W0213 23:03:47.597132 74928 logs.go:138] Found kubelet problem: Feb 13 23:02:29 addons-913502 kubelet[1554]: W0213 23:02:29.584745 1554 reflector.go:535] object-"gadget"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:addons-913502" cannot list resource "configmaps" in API group "" in the namespace "gadget": no relationship found between node 'addons-913502' and this object
W0213 23:03:47.597395 74928 logs.go:138] Found kubelet problem: Feb 13 23:02:29 addons-913502 kubelet[1554]: E0213 23:02:29.584796 1554 reflector.go:147] object-"gadget"/"kube-root-ca.crt": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:addons-913502" cannot list resource "configmaps" in API group "" in the namespace "gadget": no relationship found between node 'addons-913502' and this object
I0213 23:03:47.633132 74928 logs.go:123] Gathering logs for describe nodes ...
I0213 23:03:47.633174 74928 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.4/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
I0213 23:03:47.736670 74928 logs.go:123] Gathering logs for kube-scheduler [65dbb4b3290b81d79b9ed0f3ebb32d5c3931cea62c5ca1a83a3e5ced0af68aa6] ...
I0213 23:03:47.736708 74928 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 65dbb4b3290b81d79b9ed0f3ebb32d5c3931cea62c5ca1a83a3e5ced0af68aa6"
I0213 23:03:47.776447 74928 logs.go:123] Gathering logs for kube-proxy [a532fb0a6d4f40f6d4e96acc87a0c7a609cd1530b100bc6bdb2954f614d7ce5f] ...
I0213 23:03:47.776496 74928 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 a532fb0a6d4f40f6d4e96acc87a0c7a609cd1530b100bc6bdb2954f614d7ce5f"
I0213 23:03:47.815312 74928 logs.go:123] Gathering logs for kindnet [2c7ce000c4787cd59b99f0aa3712df814630b5df86dc8d4106851ef9fb5e528f] ...
I0213 23:03:47.815351 74928 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 2c7ce000c4787cd59b99f0aa3712df814630b5df86dc8d4106851ef9fb5e528f"
I0213 23:03:47.867670 74928 logs.go:123] Gathering logs for container status ...
I0213 23:03:47.867703 74928 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I0213 23:03:47.914077 74928 logs.go:123] Gathering logs for dmesg ...
I0213 23:03:47.914124 74928 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I0213 23:03:47.929822 74928 logs.go:123] Gathering logs for kube-apiserver [8147d14fedd8c6540430e8abd97dd62cc925f2af68896a21c6414005e96c1b27] ...
I0213 23:03:47.929861 74928 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 8147d14fedd8c6540430e8abd97dd62cc925f2af68896a21c6414005e96c1b27"
I0213 23:03:47.979183 74928 out.go:304] Setting ErrFile to fd 2...
I0213 23:03:47.979225 74928 out.go:338] TERM=,COLORTERM=, which probably does not support color
W0213 23:03:47.979306 74928 out.go:239] X Problems detected in kubelet:
W0213 23:03:47.979319 74928 out.go:239] Feb 13 23:02:29 addons-913502 kubelet[1554]: W0213 23:02:29.584745 1554 reflector.go:535] object-"gadget"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:addons-913502" cannot list resource "configmaps" in API group "" in the namespace "gadget": no relationship found between node 'addons-913502' and this object
W0213 23:03:47.979332 74928 out.go:239] Feb 13 23:02:29 addons-913502 kubelet[1554]: E0213 23:02:29.584796 1554 reflector.go:147] object-"gadget"/"kube-root-ca.crt": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:addons-913502" cannot list resource "configmaps" in API group "" in the namespace "gadget": no relationship found between node 'addons-913502' and this object
I0213 23:03:47.979349 74928 out.go:304] Setting ErrFile to fd 2...
I0213 23:03:47.979359 74928 out.go:338] TERM=,COLORTERM=, which probably does not support color
I0213 23:03:47.993988 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:48.494305 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:48.994483 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:49.494435 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:49.994370 74928 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0213 23:03:50.493478 74928 kapi.go:107] duration metric: took 1m19.505403701s to wait for kubernetes.io/minikube-addons=csi-hostpath-driver ...
I0213 23:03:50.495818 74928 out.go:177] * Enabled addons: inspektor-gadget, ingress-dns, helm-tiller, metrics-server, storage-provisioner, cloud-spanner, nvidia-device-plugin, yakd, default-storageclass, volumesnapshots, registry, gcp-auth, ingress, csi-hostpath-driver
I0213 23:03:50.497400 74928 addons.go:505] enable addons completed in 1m27.513926194s: enabled=[inspektor-gadget ingress-dns helm-tiller metrics-server storage-provisioner cloud-spanner nvidia-device-plugin yakd default-storageclass volumesnapshots registry gcp-auth ingress csi-hostpath-driver]
I0213 23:03:57.979582 74928 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8443/healthz ...
I0213 23:03:57.984099 74928 api_server.go:279] https://192.168.49.2:8443/healthz returned 200:
ok
I0213 23:03:57.985407 74928 api_server.go:141] control plane version: v1.28.4
I0213 23:03:57.985438 74928 api_server.go:131] duration metric: took 11.38425193s to wait for apiserver health ...
I0213 23:03:57.985447 74928 system_pods.go:43] waiting for kube-system pods to appear ...
I0213 23:03:57.985471 74928 cri.go:54] listing CRI containers in root : {State:all Name:kube-apiserver Namespaces:[]}
I0213 23:03:57.985531 74928 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I0213 23:03:58.019038 74928 cri.go:89] found id: "8147d14fedd8c6540430e8abd97dd62cc925f2af68896a21c6414005e96c1b27"
I0213 23:03:58.019066 74928 cri.go:89] found id: ""
I0213 23:03:58.019075 74928 logs.go:276] 1 containers: [8147d14fedd8c6540430e8abd97dd62cc925f2af68896a21c6414005e96c1b27]
I0213 23:03:58.019121 74928 ssh_runner.go:195] Run: which crictl
I0213 23:03:58.022558 74928 cri.go:54] listing CRI containers in root : {State:all Name:etcd Namespaces:[]}
I0213 23:03:58.022622 74928 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I0213 23:03:58.055009 74928 cri.go:89] found id: "01eb55b3820206495d80eef7ea79010ec8edebedb21356a351659d7ffe72b775"
I0213 23:03:58.055036 74928 cri.go:89] found id: ""
I0213 23:03:58.055045 74928 logs.go:276] 1 containers: [01eb55b3820206495d80eef7ea79010ec8edebedb21356a351659d7ffe72b775]
I0213 23:03:58.055091 74928 ssh_runner.go:195] Run: which crictl
I0213 23:03:58.058387 74928 cri.go:54] listing CRI containers in root : {State:all Name:coredns Namespaces:[]}
I0213 23:03:58.058440 74928 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I0213 23:03:58.094250 74928 cri.go:89] found id: "dfc72942996f7a7b0ebf8ea4591e12da82229d30e2590234eb4f019274e9fa03"
I0213 23:03:58.094277 74928 cri.go:89] found id: ""
I0213 23:03:58.094287 74928 logs.go:276] 1 containers: [dfc72942996f7a7b0ebf8ea4591e12da82229d30e2590234eb4f019274e9fa03]
I0213 23:03:58.094344 74928 ssh_runner.go:195] Run: which crictl
I0213 23:03:58.097864 74928 cri.go:54] listing CRI containers in root : {State:all Name:kube-scheduler Namespaces:[]}
I0213 23:03:58.097924 74928 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I0213 23:03:58.130542 74928 cri.go:89] found id: "65dbb4b3290b81d79b9ed0f3ebb32d5c3931cea62c5ca1a83a3e5ced0af68aa6"
I0213 23:03:58.130571 74928 cri.go:89] found id: ""
I0213 23:03:58.130582 74928 logs.go:276] 1 containers: [65dbb4b3290b81d79b9ed0f3ebb32d5c3931cea62c5ca1a83a3e5ced0af68aa6]
I0213 23:03:58.130630 74928 ssh_runner.go:195] Run: which crictl
I0213 23:03:58.134062 74928 cri.go:54] listing CRI containers in root : {State:all Name:kube-proxy Namespaces:[]}
I0213 23:03:58.134130 74928 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I0213 23:03:58.167942 74928 cri.go:89] found id: "a532fb0a6d4f40f6d4e96acc87a0c7a609cd1530b100bc6bdb2954f614d7ce5f"
I0213 23:03:58.167974 74928 cri.go:89] found id: ""
I0213 23:03:58.167988 74928 logs.go:276] 1 containers: [a532fb0a6d4f40f6d4e96acc87a0c7a609cd1530b100bc6bdb2954f614d7ce5f]
I0213 23:03:58.168051 74928 ssh_runner.go:195] Run: which crictl
I0213 23:03:58.171436 74928 cri.go:54] listing CRI containers in root : {State:all Name:kube-controller-manager Namespaces:[]}
I0213 23:03:58.171490 74928 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I0213 23:03:58.204459 74928 cri.go:89] found id: "926999f79520bbbaa76881e1d82a66d6c9dbff9b40626be7aec26bba143ebef4"
I0213 23:03:58.204485 74928 cri.go:89] found id: ""
I0213 23:03:58.204493 74928 logs.go:276] 1 containers: [926999f79520bbbaa76881e1d82a66d6c9dbff9b40626be7aec26bba143ebef4]
I0213 23:03:58.204542 74928 ssh_runner.go:195] Run: which crictl
I0213 23:03:58.207790 74928 cri.go:54] listing CRI containers in root : {State:all Name:kindnet Namespaces:[]}
I0213 23:03:58.207857 74928 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I0213 23:03:58.240251 74928 cri.go:89] found id: "2c7ce000c4787cd59b99f0aa3712df814630b5df86dc8d4106851ef9fb5e528f"
I0213 23:03:58.240279 74928 cri.go:89] found id: ""
I0213 23:03:58.240288 74928 logs.go:276] 1 containers: [2c7ce000c4787cd59b99f0aa3712df814630b5df86dc8d4106851ef9fb5e528f]
I0213 23:03:58.240362 74928 ssh_runner.go:195] Run: which crictl
I0213 23:03:58.243623 74928 logs.go:123] Gathering logs for kubelet ...
I0213 23:03:58.243645 74928 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
W0213 23:03:58.292090 74928 logs.go:138] Found kubelet problem: Feb 13 23:02:29 addons-913502 kubelet[1554]: W0213 23:02:29.584745 1554 reflector.go:535] object-"gadget"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:addons-913502" cannot list resource "configmaps" in API group "" in the namespace "gadget": no relationship found between node 'addons-913502' and this object
W0213 23:03:58.292256 74928 logs.go:138] Found kubelet problem: Feb 13 23:02:29 addons-913502 kubelet[1554]: E0213 23:02:29.584796 1554 reflector.go:147] object-"gadget"/"kube-root-ca.crt": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:addons-913502" cannot list resource "configmaps" in API group "" in the namespace "gadget": no relationship found between node 'addons-913502' and this object
I0213 23:03:58.326318 74928 logs.go:123] Gathering logs for coredns [dfc72942996f7a7b0ebf8ea4591e12da82229d30e2590234eb4f019274e9fa03] ...
I0213 23:03:58.326354 74928 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 dfc72942996f7a7b0ebf8ea4591e12da82229d30e2590234eb4f019274e9fa03"
I0213 23:03:58.372141 74928 logs.go:123] Gathering logs for kube-scheduler [65dbb4b3290b81d79b9ed0f3ebb32d5c3931cea62c5ca1a83a3e5ced0af68aa6] ...
I0213 23:03:58.372174 74928 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 65dbb4b3290b81d79b9ed0f3ebb32d5c3931cea62c5ca1a83a3e5ced0af68aa6"
I0213 23:03:58.409898 74928 logs.go:123] Gathering logs for kube-controller-manager [926999f79520bbbaa76881e1d82a66d6c9dbff9b40626be7aec26bba143ebef4] ...
I0213 23:03:58.409931 74928 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 926999f79520bbbaa76881e1d82a66d6c9dbff9b40626be7aec26bba143ebef4"
I0213 23:03:58.467275 74928 logs.go:123] Gathering logs for kindnet [2c7ce000c4787cd59b99f0aa3712df814630b5df86dc8d4106851ef9fb5e528f] ...
I0213 23:03:58.467318 74928 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 2c7ce000c4787cd59b99f0aa3712df814630b5df86dc8d4106851ef9fb5e528f"
I0213 23:03:58.500052 74928 logs.go:123] Gathering logs for CRI-O ...
I0213 23:03:58.500084 74928 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u crio -n 400"
I0213 23:03:58.568626 74928 logs.go:123] Gathering logs for container status ...
I0213 23:03:58.568667 74928 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I0213 23:03:58.614583 74928 logs.go:123] Gathering logs for dmesg ...
I0213 23:03:58.614623 74928 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I0213 23:03:58.629254 74928 logs.go:123] Gathering logs for describe nodes ...
I0213 23:03:58.629290 74928 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.4/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
I0213 23:03:58.732800 74928 logs.go:123] Gathering logs for kube-apiserver [8147d14fedd8c6540430e8abd97dd62cc925f2af68896a21c6414005e96c1b27] ...
I0213 23:03:58.732840 74928 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 8147d14fedd8c6540430e8abd97dd62cc925f2af68896a21c6414005e96c1b27"
I0213 23:03:58.778150 74928 logs.go:123] Gathering logs for etcd [01eb55b3820206495d80eef7ea79010ec8edebedb21356a351659d7ffe72b775] ...
I0213 23:03:58.778189 74928 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 01eb55b3820206495d80eef7ea79010ec8edebedb21356a351659d7ffe72b775"
I0213 23:03:58.817869 74928 logs.go:123] Gathering logs for kube-proxy [a532fb0a6d4f40f6d4e96acc87a0c7a609cd1530b100bc6bdb2954f614d7ce5f] ...
I0213 23:03:58.868520 74928 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 a532fb0a6d4f40f6d4e96acc87a0c7a609cd1530b100bc6bdb2954f614d7ce5f"
I0213 23:03:58.902761 74928 out.go:304] Setting ErrFile to fd 2...
I0213 23:03:58.902788 74928 out.go:338] TERM=,COLORTERM=, which probably does not support color
W0213 23:03:58.902840 74928 out.go:239] X Problems detected in kubelet:
W0213 23:03:58.902856 74928 out.go:239] Feb 13 23:02:29 addons-913502 kubelet[1554]: W0213 23:02:29.584745 1554 reflector.go:535] object-"gadget"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:addons-913502" cannot list resource "configmaps" in API group "" in the namespace "gadget": no relationship found between node 'addons-913502' and this object
W0213 23:03:58.902868 74928 out.go:239] Feb 13 23:02:29 addons-913502 kubelet[1554]: E0213 23:02:29.584796 1554 reflector.go:147] object-"gadget"/"kube-root-ca.crt": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:addons-913502" cannot list resource "configmaps" in API group "" in the namespace "gadget": no relationship found between node 'addons-913502' and this object
I0213 23:03:58.902876 74928 out.go:304] Setting ErrFile to fd 2...
I0213 23:03:58.902886 74928 out.go:338] TERM=,COLORTERM=, which probably does not support color
I0213 23:04:08.913088 74928 system_pods.go:59] 19 kube-system pods found
I0213 23:04:08.913145 74928 system_pods.go:61] "coredns-5dd5756b68-kw9vb" [a45ecc94-1a7d-4b30-b9a1-4b013b7d85b6] Running
I0213 23:04:08.913157 74928 system_pods.go:61] "csi-hostpath-attacher-0" [333cc586-eff2-4cf0-8661-dc7965e732c2] Running
I0213 23:04:08.913164 74928 system_pods.go:61] "csi-hostpath-resizer-0" [3ff50dd0-ac61-42cd-bd4e-22c24a151d0b] Running
I0213 23:04:08.913170 74928 system_pods.go:61] "csi-hostpathplugin-xhbgc" [3fa876b5-9f44-4de9-962e-81019a9b2450] Running
I0213 23:04:08.913177 74928 system_pods.go:61] "etcd-addons-913502" [16dd7c79-5009-4cf9-903d-3578ea264a77] Running
I0213 23:04:08.913182 74928 system_pods.go:61] "kindnet-x9mvr" [1d27f1a1-ead6-417c-a8ed-95aa215acc35] Running
I0213 23:04:08.913199 74928 system_pods.go:61] "kube-apiserver-addons-913502" [035e932c-9b8c-4b51-8c74-d4d18bb66b65] Running
I0213 23:04:08.913204 74928 system_pods.go:61] "kube-controller-manager-addons-913502" [a6e43d1f-af03-4c52-8f75-1758daee7a5a] Running
I0213 23:04:08.913212 74928 system_pods.go:61] "kube-ingress-dns-minikube" [463f7dd2-ee46-408c-8790-4a7318e64279] Running
I0213 23:04:08.913218 74928 system_pods.go:61] "kube-proxy-dd5xd" [89f747ec-71d3-403d-85f7-68278485ca5f] Running
I0213 23:04:08.913222 74928 system_pods.go:61] "kube-scheduler-addons-913502" [69f21cdb-2661-48b6-905c-88847a110480] Running
I0213 23:04:08.913229 74928 system_pods.go:61] "metrics-server-69cf46c98-jv886" [0721b3c3-1074-430f-8fcf-1a0a987218e0] Running
I0213 23:04:08.913235 74928 system_pods.go:61] "nvidia-device-plugin-daemonset-nwpfp" [61a6a604-82d9-4f32-9be6-9b58ec3b2930] Running
I0213 23:04:08.913242 74928 system_pods.go:61] "registry-proxy-6fcvz" [fd7e7f0f-51ba-46ce-8a59-f33819b6a633] Running
I0213 23:04:08.913246 74928 system_pods.go:61] "registry-zd97h" [4c64ca96-e524-479f-b3b1-8e37e19bf37e] Running
I0213 23:04:08.913250 74928 system_pods.go:61] "snapshot-controller-58dbcc7b99-8gqrl" [64679169-f689-4c5d-b89e-dbb3e7095f26] Running
I0213 23:04:08.913256 74928 system_pods.go:61] "snapshot-controller-58dbcc7b99-k45hw" [7b5af714-4b23-43e2-9b68-0a03e729a563] Running
I0213 23:04:08.913260 74928 system_pods.go:61] "storage-provisioner" [32af8c0c-00a1-4f92-afdb-fffc86fe3219] Running
I0213 23:04:08.913265 74928 system_pods.go:61] "tiller-deploy-7b677967b9-hd46l" [fe48a6c2-5ee5-4f2e-afc7-bdf16742cbfe] Running
I0213 23:04:08.913274 74928 system_pods.go:74] duration metric: took 10.927820338s to wait for pod list to return data ...
I0213 23:04:08.913284 74928 default_sa.go:34] waiting for default service account to be created ...
I0213 23:04:08.915405 74928 default_sa.go:45] found service account: "default"
I0213 23:04:08.915430 74928 default_sa.go:55] duration metric: took 2.136757ms for default service account to be created ...
I0213 23:04:08.915441 74928 system_pods.go:116] waiting for k8s-apps to be running ...
I0213 23:04:08.923119 74928 system_pods.go:86] 19 kube-system pods found
I0213 23:04:08.923153 74928 system_pods.go:89] "coredns-5dd5756b68-kw9vb" [a45ecc94-1a7d-4b30-b9a1-4b013b7d85b6] Running
I0213 23:04:08.923162 74928 system_pods.go:89] "csi-hostpath-attacher-0" [333cc586-eff2-4cf0-8661-dc7965e732c2] Running
I0213 23:04:08.923168 74928 system_pods.go:89] "csi-hostpath-resizer-0" [3ff50dd0-ac61-42cd-bd4e-22c24a151d0b] Running
I0213 23:04:08.923174 74928 system_pods.go:89] "csi-hostpathplugin-xhbgc" [3fa876b5-9f44-4de9-962e-81019a9b2450] Running
I0213 23:04:08.923179 74928 system_pods.go:89] "etcd-addons-913502" [16dd7c79-5009-4cf9-903d-3578ea264a77] Running
I0213 23:04:08.923184 74928 system_pods.go:89] "kindnet-x9mvr" [1d27f1a1-ead6-417c-a8ed-95aa215acc35] Running
I0213 23:04:08.923191 74928 system_pods.go:89] "kube-apiserver-addons-913502" [035e932c-9b8c-4b51-8c74-d4d18bb66b65] Running
I0213 23:04:08.923203 74928 system_pods.go:89] "kube-controller-manager-addons-913502" [a6e43d1f-af03-4c52-8f75-1758daee7a5a] Running
I0213 23:04:08.923211 74928 system_pods.go:89] "kube-ingress-dns-minikube" [463f7dd2-ee46-408c-8790-4a7318e64279] Running
I0213 23:04:08.923217 74928 system_pods.go:89] "kube-proxy-dd5xd" [89f747ec-71d3-403d-85f7-68278485ca5f] Running
I0213 23:04:08.923227 74928 system_pods.go:89] "kube-scheduler-addons-913502" [69f21cdb-2661-48b6-905c-88847a110480] Running
I0213 23:04:08.923235 74928 system_pods.go:89] "metrics-server-69cf46c98-jv886" [0721b3c3-1074-430f-8fcf-1a0a987218e0] Running
I0213 23:04:08.923243 74928 system_pods.go:89] "nvidia-device-plugin-daemonset-nwpfp" [61a6a604-82d9-4f32-9be6-9b58ec3b2930] Running
I0213 23:04:08.923260 74928 system_pods.go:89] "registry-proxy-6fcvz" [fd7e7f0f-51ba-46ce-8a59-f33819b6a633] Running
I0213 23:04:08.923266 74928 system_pods.go:89] "registry-zd97h" [4c64ca96-e524-479f-b3b1-8e37e19bf37e] Running
I0213 23:04:08.923275 74928 system_pods.go:89] "snapshot-controller-58dbcc7b99-8gqrl" [64679169-f689-4c5d-b89e-dbb3e7095f26] Running
I0213 23:04:08.923285 74928 system_pods.go:89] "snapshot-controller-58dbcc7b99-k45hw" [7b5af714-4b23-43e2-9b68-0a03e729a563] Running
I0213 23:04:08.923293 74928 system_pods.go:89] "storage-provisioner" [32af8c0c-00a1-4f92-afdb-fffc86fe3219] Running
I0213 23:04:08.923303 74928 system_pods.go:89] "tiller-deploy-7b677967b9-hd46l" [fe48a6c2-5ee5-4f2e-afc7-bdf16742cbfe] Running
I0213 23:04:08.923314 74928 system_pods.go:126] duration metric: took 7.865029ms to wait for k8s-apps to be running ...
I0213 23:04:08.923327 74928 system_svc.go:44] waiting for kubelet service to be running ....
I0213 23:04:08.923416 74928 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I0213 23:04:08.934795 74928 system_svc.go:56] duration metric: took 11.45976ms WaitForService to wait for kubelet.
I0213 23:04:08.934822 74928 kubeadm.go:581] duration metric: took 1m45.370680443s to wait for : map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] ...
I0213 23:04:08.934844 74928 node_conditions.go:102] verifying NodePressure condition ...
I0213 23:04:08.937589 74928 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I0213 23:04:08.937618 74928 node_conditions.go:123] node cpu capacity is 8
I0213 23:04:08.937629 74928 node_conditions.go:105] duration metric: took 2.781014ms to run NodePressure ...
I0213 23:04:08.937641 74928 start.go:228] waiting for startup goroutines ...
I0213 23:04:08.937647 74928 start.go:233] waiting for cluster config update ...
I0213 23:04:08.937662 74928 start.go:242] writing updated cluster config ...
I0213 23:04:08.937959 74928 ssh_runner.go:195] Run: rm -f paused
I0213 23:04:08.986987 74928 start.go:600] kubectl: 1.29.1, cluster: 1.28.4 (minor skew: 1)
I0213 23:04:08.989418 74928 out.go:177] * Done! kubectl is now configured to use "addons-913502" cluster and "default" namespace by default
==> CRI-O <==
Feb 13 23:06:49 addons-913502 crio[948]: time="2024-02-13 23:06:49.328263310Z" level=info msg="Pulled image: gcr.io/google-samples/hello-app@sha256:b1455e1c4fcc5ea1023c9e3b584cd84b64eb920e332feff690a2829696e379e7" id=a59548cc-42a0-49ef-8bbe-7a8caed4df5c name=/runtime.v1.ImageService/PullImage
Feb 13 23:06:49 addons-913502 crio[948]: time="2024-02-13 23:06:49.329098705Z" level=info msg="Checking image status: gcr.io/google-samples/hello-app:1.0" id=0cead669-f2dd-4d1f-85cd-38bcd0e0c5c8 name=/runtime.v1.ImageService/ImageStatus
Feb 13 23:06:49 addons-913502 crio[948]: time="2024-02-13 23:06:49.330330922Z" level=info msg="Image status: &ImageStatusResponse{Image:&Image{Id:dd1b12fcb60978ac32686ef6732d56f612c8636ef86693c09613946a54c69d79,RepoTags:[gcr.io/google-samples/hello-app:1.0],RepoDigests:[gcr.io/google-samples/hello-app@sha256:b1455e1c4fcc5ea1023c9e3b584cd84b64eb920e332feff690a2829696e379e7],Size_:28999827,Uid:nil,Username:nonroot,Spec:nil,},Info:map[string]string{},}" id=0cead669-f2dd-4d1f-85cd-38bcd0e0c5c8 name=/runtime.v1.ImageService/ImageStatus
Feb 13 23:06:49 addons-913502 crio[948]: time="2024-02-13 23:06:49.331186877Z" level=info msg="Creating container: default/hello-world-app-5d77478584-q55k8/hello-world-app" id=1cda602c-c67e-479a-8130-b081f087fbf4 name=/runtime.v1.RuntimeService/CreateContainer
Feb 13 23:06:49 addons-913502 crio[948]: time="2024-02-13 23:06:49.331285061Z" level=warning msg="Allowed annotations are specified for workload []"
Feb 13 23:06:49 addons-913502 crio[948]: time="2024-02-13 23:06:49.381610857Z" level=info msg="Created container 0844f11527e57a4d41dd7e530c599f0d746590bda0cba4204d1f9b7730499562: default/hello-world-app-5d77478584-q55k8/hello-world-app" id=1cda602c-c67e-479a-8130-b081f087fbf4 name=/runtime.v1.RuntimeService/CreateContainer
Feb 13 23:06:49 addons-913502 crio[948]: time="2024-02-13 23:06:49.382260728Z" level=info msg="Starting container: 0844f11527e57a4d41dd7e530c599f0d746590bda0cba4204d1f9b7730499562" id=772eb565-6db0-4a53-bc11-07157eed993c name=/runtime.v1.RuntimeService/StartContainer
Feb 13 23:06:49 addons-913502 crio[948]: time="2024-02-13 23:06:49.389452019Z" level=info msg="Started container" PID=10913 containerID=0844f11527e57a4d41dd7e530c599f0d746590bda0cba4204d1f9b7730499562 description=default/hello-world-app-5d77478584-q55k8/hello-world-app id=772eb565-6db0-4a53-bc11-07157eed993c name=/runtime.v1.RuntimeService/StartContainer sandboxID=2dd7bb311ccf265e922e2314a30736bc7310d7af34b9382021487be5ad03a406
Feb 13 23:06:49 addons-913502 crio[948]: time="2024-02-13 23:06:49.922200033Z" level=info msg="Removing container: f462d4866b127b659d396a233d73064ccb1267ca980a3693e0617ab7cf2ed066" id=5a3d375f-4452-47e8-b22b-671c9f35a804 name=/runtime.v1.RuntimeService/RemoveContainer
Feb 13 23:06:49 addons-913502 crio[948]: time="2024-02-13 23:06:49.939015243Z" level=info msg="Removed container f462d4866b127b659d396a233d73064ccb1267ca980a3693e0617ab7cf2ed066: kube-system/kube-ingress-dns-minikube/minikube-ingress-dns" id=5a3d375f-4452-47e8-b22b-671c9f35a804 name=/runtime.v1.RuntimeService/RemoveContainer
Feb 13 23:06:51 addons-913502 crio[948]: time="2024-02-13 23:06:51.486648219Z" level=info msg="Stopping container: a7dd59c495e2f1fac800152c02cb43564fa7e66c5b3f65d5ef1951cee9176dc2 (timeout: 2s)" id=ece95574-7b2a-4af7-a0f7-22841695ee13 name=/runtime.v1.RuntimeService/StopContainer
Feb 13 23:06:53 addons-913502 crio[948]: time="2024-02-13 23:06:53.492232795Z" level=warning msg="Stopping container a7dd59c495e2f1fac800152c02cb43564fa7e66c5b3f65d5ef1951cee9176dc2 with stop signal timed out: timeout reached after 2 seconds waiting for container process to exit" id=ece95574-7b2a-4af7-a0f7-22841695ee13 name=/runtime.v1.RuntimeService/StopContainer
Feb 13 23:06:53 addons-913502 conmon[6196]: conmon a7dd59c495e2f1fac800 <ninfo>: container 6208 exited with status 137
Feb 13 23:06:53 addons-913502 crio[948]: time="2024-02-13 23:06:53.624460635Z" level=info msg="Stopped container a7dd59c495e2f1fac800152c02cb43564fa7e66c5b3f65d5ef1951cee9176dc2: ingress-nginx/ingress-nginx-controller-69cff4fd79-blkwl/controller" id=ece95574-7b2a-4af7-a0f7-22841695ee13 name=/runtime.v1.RuntimeService/StopContainer
Feb 13 23:06:53 addons-913502 crio[948]: time="2024-02-13 23:06:53.624980489Z" level=info msg="Stopping pod sandbox: 0c903e671316fc05e2d42189304ff05aec225d93d99f132077b88000aa72c70c" id=24a657c2-3dee-4ed1-b286-7e0965707daa name=/runtime.v1.RuntimeService/StopPodSandbox
Feb 13 23:06:53 addons-913502 crio[948]: time="2024-02-13 23:06:53.628052725Z" level=info msg="Restoring iptables rules: *nat\n:KUBE-HP-NXLUJ6JIGZYGBJMG - [0:0]\n:KUBE-HOSTPORTS - [0:0]\n:KUBE-HP-65SHGNOS7R6GYCCA - [0:0]\n-X KUBE-HP-NXLUJ6JIGZYGBJMG\n-X KUBE-HP-65SHGNOS7R6GYCCA\nCOMMIT\n"
Feb 13 23:06:53 addons-913502 crio[948]: time="2024-02-13 23:06:53.629450509Z" level=info msg="Closing host port tcp:80"
Feb 13 23:06:53 addons-913502 crio[948]: time="2024-02-13 23:06:53.629497752Z" level=info msg="Closing host port tcp:443"
Feb 13 23:06:53 addons-913502 crio[948]: time="2024-02-13 23:06:53.630958263Z" level=info msg="Host port tcp:80 does not have an open socket"
Feb 13 23:06:53 addons-913502 crio[948]: time="2024-02-13 23:06:53.630980234Z" level=info msg="Host port tcp:443 does not have an open socket"
Feb 13 23:06:53 addons-913502 crio[948]: time="2024-02-13 23:06:53.631116121Z" level=info msg="Got pod network &{Name:ingress-nginx-controller-69cff4fd79-blkwl Namespace:ingress-nginx ID:0c903e671316fc05e2d42189304ff05aec225d93d99f132077b88000aa72c70c UID:5185a108-4307-4b54-a0fe-4895307c6b78 NetNS:/var/run/netns/31584504-0f21-4747-8510-8ec5e6ec1cd1 Networks:[{Name:kindnet Ifname:eth0}] RuntimeConfig:map[kindnet:{IP: MAC: PortMappings:[] Bandwidth:<nil> IpRanges:[]}] Aliases:map[]}"
Feb 13 23:06:53 addons-913502 crio[948]: time="2024-02-13 23:06:53.631229829Z" level=info msg="Deleting pod ingress-nginx_ingress-nginx-controller-69cff4fd79-blkwl from CNI network \"kindnet\" (type=ptp)"
Feb 13 23:06:53 addons-913502 crio[948]: time="2024-02-13 23:06:53.665832084Z" level=info msg="Stopped pod sandbox: 0c903e671316fc05e2d42189304ff05aec225d93d99f132077b88000aa72c70c" id=24a657c2-3dee-4ed1-b286-7e0965707daa name=/runtime.v1.RuntimeService/StopPodSandbox
Feb 13 23:06:53 addons-913502 crio[948]: time="2024-02-13 23:06:53.934981391Z" level=info msg="Removing container: a7dd59c495e2f1fac800152c02cb43564fa7e66c5b3f65d5ef1951cee9176dc2" id=013344e5-4a30-426d-8462-0f5ae53d919e name=/runtime.v1.RuntimeService/RemoveContainer
Feb 13 23:06:53 addons-913502 crio[948]: time="2024-02-13 23:06:53.950398715Z" level=info msg="Removed container a7dd59c495e2f1fac800152c02cb43564fa7e66c5b3f65d5ef1951cee9176dc2: ingress-nginx/ingress-nginx-controller-69cff4fd79-blkwl/controller" id=013344e5-4a30-426d-8462-0f5ae53d919e name=/runtime.v1.RuntimeService/RemoveContainer
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD
0844f11527e57 gcr.io/google-samples/hello-app@sha256:b1455e1c4fcc5ea1023c9e3b584cd84b64eb920e332feff690a2829696e379e7 9 seconds ago Running hello-world-app 0 2dd7bb311ccf2 hello-world-app-5d77478584-q55k8
095cfc48c0d80 docker.io/library/nginx@sha256:b1cfc4e0e01b4dceca3265fd4ca97921569fca1a10919639bedfa8dad9127027 2 minutes ago Running nginx 0 18f1ca5bfe868 nginx
32909e02df9ec ghcr.io/headlamp-k8s/headlamp@sha256:3c6da859a989f285b2fd2ac2f4763d1884d54a51e4405301e5324e0b2b70bd67 2 minutes ago Running headlamp 0 75f2d2119d62e headlamp-7ddfbb94ff-gfh4d
a53887e924cf1 gcr.io/k8s-minikube/gcp-auth-webhook@sha256:3e92b3d1c15220ae0f2f3505fb3a88899a1e48ec85fb777a1a4945ae9db2ce06 3 minutes ago Running gcp-auth 0 14194a907aeeb gcp-auth-d4c87556c-5lpdw
f6b040ce18e80 1ebff0f9671bc015dc340b12c5bf6f3dbda7d0a8b5332bd095f21bd52e1b30fb 3 minutes ago Exited patch 2 cccf7a8b2e9d9 ingress-nginx-admission-patch-52sbr
c02134124c971 registry.k8s.io/ingress-nginx/kube-webhook-certgen@sha256:29318c6957228dc10feb67fed5b91bdd8a9e3279e5b29c5965b9bd31a01ee385 3 minutes ago Exited create 0 f4d0e632b4f12 ingress-nginx-admission-create-hrb59
1042f95db46f4 docker.io/marcnuri/yakd@sha256:a3f540278e4c11373e15605311851dd9c64d208f4d63e727bccc0e39f9329310 3 minutes ago Running yakd 0 c6d7191ac8f67 yakd-dashboard-9947fc6bf-rg866
abbb6c7e78ab4 docker.io/rancher/local-path-provisioner@sha256:73f712e7af12b06720c35ce75217f904f00e4bd96de79f8db1cf160112e667ef 3 minutes ago Running local-path-provisioner 0 d19bcbe2a3aea local-path-provisioner-78b46b4d5c-dnjvv
dfc72942996f7 ead0a4a53df89fd173874b46093b6e62d8c72967bbf606d672c9e8c9b601a4fc 4 minutes ago Running coredns 0 64387d8b4ea34 coredns-5dd5756b68-kw9vb
33a1dd0fd8243 6e38f40d628db3002f5617342c8872c935de530d867d0f709a2fbda1a302a562 4 minutes ago Running storage-provisioner 0 1e0837aee4791 storage-provisioner
2c7ce000c4787 c7d1297425461d3e24fe0ba658818593be65d13a2dd45a4c02d8768d6c8c18cc 4 minutes ago Running kindnet-cni 0 47f612c7d026e kindnet-x9mvr
a532fb0a6d4f4 83f6cc407eed88d214aad97f3539bde5c8e485ff14424cd021a3a2899304398e 4 minutes ago Running kube-proxy 0 91a5992b2daca kube-proxy-dd5xd
01eb55b382020 73deb9a3f702532592a4167455f8bf2e5f5d900bcc959ba2fd2d35c321de1af9 4 minutes ago Running etcd 0 3dfeeb690a198 etcd-addons-913502
926999f79520b d058aa5ab969ce7b84d25e7188be1f80633b18db8ea7d02d9d0a78e676236591 4 minutes ago Running kube-controller-manager 0 fa97521f32099 kube-controller-manager-addons-913502
65dbb4b3290b8 e3db313c6dbc065d4ac3b32c7a6f2a878949031b881d217b63881a109c5cfba1 4 minutes ago Running kube-scheduler 0 d086b3c8e8f26 kube-scheduler-addons-913502
8147d14fedd8c 7fe0e6f37db33464725e616a12ccc4e36970370005a2b09683a974db6350c257 4 minutes ago Running kube-apiserver 0 963f2a2426be9 kube-apiserver-addons-913502
==> coredns [dfc72942996f7a7b0ebf8ea4591e12da82229d30e2590234eb4f019274e9fa03] <==
[INFO] 10.244.0.5:48935 - 1154 "AAAA IN registry.kube-system.svc.cluster.local.cluster.local. udp 70 false 512" NXDOMAIN qr,aa,rd 163 0.0000919s
[INFO] 10.244.0.5:47036 - 59019 "AAAA IN registry.kube-system.svc.cluster.local.us-central1-a.c.k8s-minikube.internal. udp 94 false 512" NXDOMAIN qr,rd,ra 94 0.003878437s
[INFO] 10.244.0.5:47036 - 63638 "A IN registry.kube-system.svc.cluster.local.us-central1-a.c.k8s-minikube.internal. udp 94 false 512" NXDOMAIN qr,rd,ra 94 0.005672908s
[INFO] 10.244.0.5:60150 - 17903 "A IN registry.kube-system.svc.cluster.local.c.k8s-minikube.internal. udp 80 false 512" NXDOMAIN qr,rd,ra 80 0.005464711s
[INFO] 10.244.0.5:60150 - 31722 "AAAA IN registry.kube-system.svc.cluster.local.c.k8s-minikube.internal. udp 80 false 512" NXDOMAIN qr,rd,ra 80 0.005760444s
[INFO] 10.244.0.5:35471 - 18041 "AAAA IN registry.kube-system.svc.cluster.local.google.internal. udp 72 false 512" NXDOMAIN qr,rd,ra 72 0.004296577s
[INFO] 10.244.0.5:35471 - 4986 "A IN registry.kube-system.svc.cluster.local.google.internal. udp 72 false 512" NXDOMAIN qr,rd,ra 72 0.006785779s
[INFO] 10.244.0.5:36615 - 8733 "AAAA IN registry.kube-system.svc.cluster.local. udp 56 false 512" NOERROR qr,aa,rd 149 0.000068443s
[INFO] 10.244.0.5:36615 - 6171 "A IN registry.kube-system.svc.cluster.local. udp 56 false 512" NOERROR qr,aa,rd 110 0.000110002s
[INFO] 10.244.0.20:41713 - 24232 "A IN storage.googleapis.com.gcp-auth.svc.cluster.local. udp 78 false 1232" NXDOMAIN qr,aa,rd 160 0.000193934s
[INFO] 10.244.0.20:55364 - 47880 "AAAA IN storage.googleapis.com.gcp-auth.svc.cluster.local. udp 78 false 1232" NXDOMAIN qr,aa,rd 160 0.00023569s
[INFO] 10.244.0.20:48787 - 7014 "A IN storage.googleapis.com.svc.cluster.local. udp 69 false 1232" NXDOMAIN qr,aa,rd 151 0.000103692s
[INFO] 10.244.0.20:50377 - 18280 "AAAA IN storage.googleapis.com.svc.cluster.local. udp 69 false 1232" NXDOMAIN qr,aa,rd 151 0.000207898s
[INFO] 10.244.0.20:37006 - 24365 "AAAA IN storage.googleapis.com.cluster.local. udp 65 false 1232" NXDOMAIN qr,aa,rd 147 0.000117348s
[INFO] 10.244.0.20:34953 - 9016 "A IN storage.googleapis.com.cluster.local. udp 65 false 1232" NXDOMAIN qr,aa,rd 147 0.000118315s
[INFO] 10.244.0.20:36408 - 49095 "AAAA IN storage.googleapis.com.us-central1-a.c.k8s-minikube.internal. udp 89 false 1232" NXDOMAIN qr,rd,ra 78 0.007237599s
[INFO] 10.244.0.20:45186 - 5417 "A IN storage.googleapis.com.us-central1-a.c.k8s-minikube.internal. udp 89 false 1232" NXDOMAIN qr,rd,ra 78 0.007626588s
[INFO] 10.244.0.20:48534 - 53281 "A IN storage.googleapis.com.c.k8s-minikube.internal. udp 75 false 1232" NXDOMAIN qr,rd,ra 64 0.006939279s
[INFO] 10.244.0.20:48536 - 61531 "AAAA IN storage.googleapis.com.c.k8s-minikube.internal. udp 75 false 1232" NXDOMAIN qr,rd,ra 64 0.007955487s
[INFO] 10.244.0.20:56357 - 62279 "A IN storage.googleapis.com.google.internal. udp 67 false 1232" NXDOMAIN qr,rd,ra 56 0.005494063s
[INFO] 10.244.0.20:53840 - 12553 "AAAA IN storage.googleapis.com.google.internal. udp 67 false 1232" NXDOMAIN qr,rd,ra 56 0.005663663s
[INFO] 10.244.0.20:53775 - 54501 "A IN storage.googleapis.com. udp 51 false 1232" NOERROR qr,rd,ra 648 0.00094504s
[INFO] 10.244.0.20:41867 - 39431 "AAAA IN storage.googleapis.com. udp 51 false 1232" NOERROR qr,rd,ra 240 0.000975881s
[INFO] 10.244.0.24:33790 - 2 "AAAA IN registry.kube-system.svc.cluster.local. udp 56 false 512" NOERROR qr,aa,rd 149 0.000197173s
[INFO] 10.244.0.24:52628 - 3 "A IN registry.kube-system.svc.cluster.local. udp 56 false 512" NOERROR qr,aa,rd 110 0.000131795s
==> describe nodes <==
Name: addons-913502
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=addons-913502
kubernetes.io/os=linux
minikube.k8s.io/commit=90664111bc55fed26ce3e984eae935c06b114802
minikube.k8s.io/name=addons-913502
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2024_02_13T23_02_10_0700
minikube.k8s.io/version=v1.32.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
topology.hostpath.csi/node=addons-913502
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///var/run/crio/crio.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Tue, 13 Feb 2024 23:02:06 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: addons-913502
AcquireTime: <unset>
RenewTime: Tue, 13 Feb 2024 23:06:56 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Tue, 13 Feb 2024 23:05:12 +0000 Tue, 13 Feb 2024 23:02:05 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Tue, 13 Feb 2024 23:05:12 +0000 Tue, 13 Feb 2024 23:02:05 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Tue, 13 Feb 2024 23:05:12 +0000 Tue, 13 Feb 2024 23:02:05 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Tue, 13 Feb 2024 23:05:12 +0000 Tue, 13 Feb 2024 23:02:57 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.49.2
Hostname: addons-913502
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32859360Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32859360Ki
pods: 110
System Info:
Machine ID: 99ec04bc261446e2976859579fc20c50
System UUID: 1219682b-0a1a-42f3-8732-01d5f51f0db6
Boot ID: 997a1092-3efa-483b-88f8-21b3b3d49d89
Kernel Version: 5.15.0-1051-gcp
OS Image: Ubuntu 22.04.3 LTS
Operating System: linux
Architecture: amd64
Container Runtime Version: cri-o://1.24.6
Kubelet Version: v1.28.4
Kube-Proxy Version: v1.28.4
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (14 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default hello-world-app-5d77478584-q55k8 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 10s
default nginx 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 2m33s
gcp-auth gcp-auth-d4c87556c-5lpdw 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 4m27s
headlamp headlamp-7ddfbb94ff-gfh4d 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 2m48s
kube-system coredns-5dd5756b68-kw9vb 100m (1%!)(MISSING) 0 (0%!)(MISSING) 70Mi (0%!)(MISSING) 170Mi (0%!)(MISSING) 4m35s
kube-system etcd-addons-913502 100m (1%!)(MISSING) 0 (0%!)(MISSING) 100Mi (0%!)(MISSING) 0 (0%!)(MISSING) 4m49s
kube-system kindnet-x9mvr 100m (1%!)(MISSING) 100m (1%!)(MISSING) 50Mi (0%!)(MISSING) 50Mi (0%!)(MISSING) 4m36s
kube-system kube-apiserver-addons-913502 250m (3%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 4m49s
kube-system kube-controller-manager-addons-913502 200m (2%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 4m49s
kube-system kube-proxy-dd5xd 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 4m36s
kube-system kube-scheduler-addons-913502 100m (1%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 4m49s
kube-system storage-provisioner 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 4m30s
local-path-storage local-path-provisioner-78b46b4d5c-dnjvv 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 4m30s
yakd-dashboard yakd-dashboard-9947fc6bf-rg866 0 (0%!)(MISSING) 0 (0%!)(MISSING) 128Mi (0%!)(MISSING) 256Mi (0%!)(MISSING) 4m29s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (10%!)(MISSING) 100m (1%!)(MISSING)
memory 348Mi (1%!)(MISSING) 476Mi (1%!)(MISSING)
ephemeral-storage 0 (0%!)(MISSING) 0 (0%!)(MISSING)
hugepages-1Gi 0 (0%!)(MISSING) 0 (0%!)(MISSING)
hugepages-2Mi 0 (0%!)(MISSING) 0 (0%!)(MISSING)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 4m30s kube-proxy
Normal Starting 4m55s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 4m55s (x8 over 4m55s) kubelet Node addons-913502 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 4m55s (x8 over 4m55s) kubelet Node addons-913502 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 4m55s (x8 over 4m55s) kubelet Node addons-913502 status is now: NodeHasSufficientPID
Normal Starting 4m49s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 4m49s kubelet Node addons-913502 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 4m49s kubelet Node addons-913502 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 4m49s kubelet Node addons-913502 status is now: NodeHasSufficientPID
Normal RegisteredNode 4m37s node-controller Node addons-913502 event: Registered Node addons-913502 in Controller
Normal NodeReady 4m1s kubelet Node addons-913502 status is now: NodeReady
==> dmesg <==
[ +0.007821] device-mapper: core: CONFIG_IMA_DISABLE_HTABLE is disabled. Duplicate IMA measurements will not be recorded in the IMA log.
[ +0.003777] platform eisa.0: EISA: Cannot allocate resource for mainboard
[ +0.000809] platform eisa.0: Cannot allocate resource for EISA slot 1
[ +0.000712] platform eisa.0: Cannot allocate resource for EISA slot 2
[ +0.000674] platform eisa.0: Cannot allocate resource for EISA slot 3
[ +0.000695] platform eisa.0: Cannot allocate resource for EISA slot 4
[ +0.000834] platform eisa.0: Cannot allocate resource for EISA slot 5
[ +0.000854] platform eisa.0: Cannot allocate resource for EISA slot 6
[ +0.000008] platform eisa.0: Cannot allocate resource for EISA slot 7
[ +0.001547] platform eisa.0: Cannot allocate resource for EISA slot 8
[ +9.823537] kauditd_printk_skb: 36 callbacks suppressed
[Feb13 23:04] IPv4: martian source 10.244.0.21 from 127.0.0.1, on dev eth0
[ +0.000008] ll header: 00000000: ba 53 96 50 8d 56 a2 88 5c c9 69 2e 08 00
[ +1.002732] IPv4: martian source 10.244.0.21 from 127.0.0.1, on dev eth0
[ +0.000005] ll header: 00000000: ba 53 96 50 8d 56 a2 88 5c c9 69 2e 08 00
[ +2.019801] IPv4: martian source 10.244.0.21 from 127.0.0.1, on dev eth0
[ +0.000008] ll header: 00000000: ba 53 96 50 8d 56 a2 88 5c c9 69 2e 08 00
[ +4.027622] IPv4: martian source 10.244.0.21 from 127.0.0.1, on dev eth0
[ +0.000007] ll header: 00000000: ba 53 96 50 8d 56 a2 88 5c c9 69 2e 08 00
[ +8.191203] IPv4: martian source 10.244.0.21 from 127.0.0.1, on dev eth0
[ +0.000006] ll header: 00000000: ba 53 96 50 8d 56 a2 88 5c c9 69 2e 08 00
[Feb13 23:05] IPv4: martian source 10.244.0.21 from 127.0.0.1, on dev eth0
[ +0.000006] ll header: 00000000: ba 53 96 50 8d 56 a2 88 5c c9 69 2e 08 00
[ +33.276812] IPv4: martian source 10.244.0.21 from 127.0.0.1, on dev eth0
[ +0.000006] ll header: 00000000: ba 53 96 50 8d 56 a2 88 5c c9 69 2e 08 00
==> etcd [01eb55b3820206495d80eef7ea79010ec8edebedb21356a351659d7ffe72b775] <==
{"level":"info","ts":"2024-02-13T23:02:04.485939Z","caller":"embed/etcd.go:597","msg":"serving peer traffic","address":"192.168.49.2:2380"}
{"level":"info","ts":"2024-02-13T23:02:04.485999Z","caller":"embed/etcd.go:569","msg":"cmux::serve","address":"192.168.49.2:2380"}
{"level":"info","ts":"2024-02-13T23:02:04.486067Z","caller":"embed/etcd.go:278","msg":"now serving peer/client/metrics","local-member-id":"aec36adc501070cc","initial-advertise-peer-urls":["https://192.168.49.2:2380"],"listen-peer-urls":["https://192.168.49.2:2380"],"advertise-client-urls":["https://192.168.49.2:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.49.2:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
{"level":"info","ts":"2024-02-13T23:02:04.486102Z","caller":"embed/etcd.go:855","msg":"serving metrics","address":"http://127.0.0.1:2381"}
{"level":"info","ts":"2024-02-13T23:02:05.068363Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc is starting a new election at term 1"}
{"level":"info","ts":"2024-02-13T23:02:05.068439Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became pre-candidate at term 1"}
{"level":"info","ts":"2024-02-13T23:02:05.068457Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc received MsgPreVoteResp from aec36adc501070cc at term 1"}
{"level":"info","ts":"2024-02-13T23:02:05.068482Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became candidate at term 2"}
{"level":"info","ts":"2024-02-13T23:02:05.06849Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc received MsgVoteResp from aec36adc501070cc at term 2"}
{"level":"info","ts":"2024-02-13T23:02:05.068502Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became leader at term 2"}
{"level":"info","ts":"2024-02-13T23:02:05.068512Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: aec36adc501070cc elected leader aec36adc501070cc at term 2"}
{"level":"info","ts":"2024-02-13T23:02:05.069532Z","caller":"etcdserver/server.go:2571","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2024-02-13T23:02:05.070208Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2024-02-13T23:02:05.070211Z","caller":"etcdserver/server.go:2062","msg":"published local member to cluster through raft","local-member-id":"aec36adc501070cc","local-member-attributes":"{Name:addons-913502 ClientURLs:[https://192.168.49.2:2379]}","request-path":"/0/members/aec36adc501070cc/attributes","cluster-id":"fa54960ea34d58be","publish-timeout":"7s"}
{"level":"info","ts":"2024-02-13T23:02:05.070265Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2024-02-13T23:02:05.070439Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2024-02-13T23:02:05.070462Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2024-02-13T23:02:05.070606Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"fa54960ea34d58be","local-member-id":"aec36adc501070cc","cluster-version":"3.5"}
{"level":"info","ts":"2024-02-13T23:02:05.070691Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2024-02-13T23:02:05.07075Z","caller":"etcdserver/server.go:2595","msg":"cluster version is updated","cluster-version":"3.5"}
{"level":"info","ts":"2024-02-13T23:02:05.071444Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"info","ts":"2024-02-13T23:02:05.071582Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.49.2:2379"}
{"level":"info","ts":"2024-02-13T23:02:25.874217Z","caller":"traceutil/trace.go:171","msg":"trace[772959111] transaction","detail":"{read_only:false; response_revision:421; number_of_response:1; }","duration":"194.823819ms","start":"2024-02-13T23:02:25.679362Z","end":"2024-02-13T23:02:25.874186Z","steps":["trace[772959111] 'process raft request' (duration: 191.406014ms)"],"step_count":1}
{"level":"info","ts":"2024-02-13T23:02:25.874886Z","caller":"traceutil/trace.go:171","msg":"trace[1698636606] transaction","detail":"{read_only:false; response_revision:422; number_of_response:1; }","duration":"195.106121ms","start":"2024-02-13T23:02:25.679765Z","end":"2024-02-13T23:02:25.874871Z","steps":["trace[1698636606] 'process raft request' (duration: 194.332479ms)"],"step_count":1}
{"level":"info","ts":"2024-02-13T23:02:26.766289Z","caller":"traceutil/trace.go:171","msg":"trace[326315272] transaction","detail":"{read_only:false; response_revision:431; number_of_response:1; }","duration":"100.382619ms","start":"2024-02-13T23:02:26.665875Z","end":"2024-02-13T23:02:26.766257Z","steps":["trace[326315272] 'process raft request' (duration: 14.829789ms)","trace[326315272] 'compare' (duration: 85.456877ms)"],"step_count":2}
==> gcp-auth [a53887e924cf1cf348955dda27f4e348f8991ff3c1507b2306bd849cd4c42bc2] <==
2024/02/13 23:03:38 GCP Auth Webhook started!
2024/02/13 23:04:10 Ready to marshal response ...
2024/02/13 23:04:10 Ready to write response ...
2024/02/13 23:04:10 Ready to marshal response ...
2024/02/13 23:04:10 Ready to write response ...
2024/02/13 23:04:10 Ready to marshal response ...
2024/02/13 23:04:10 Ready to write response ...
2024/02/13 23:04:14 Ready to marshal response ...
2024/02/13 23:04:14 Ready to write response ...
2024/02/13 23:04:20 Ready to marshal response ...
2024/02/13 23:04:20 Ready to write response ...
2024/02/13 23:04:21 Ready to marshal response ...
2024/02/13 23:04:21 Ready to write response ...
2024/02/13 23:04:25 Ready to marshal response ...
2024/02/13 23:04:25 Ready to write response ...
2024/02/13 23:04:33 Ready to marshal response ...
2024/02/13 23:04:33 Ready to write response ...
2024/02/13 23:04:33 Ready to marshal response ...
2024/02/13 23:04:33 Ready to write response ...
2024/02/13 23:04:42 Ready to marshal response ...
2024/02/13 23:04:42 Ready to write response ...
2024/02/13 23:04:49 Ready to marshal response ...
2024/02/13 23:04:49 Ready to write response ...
2024/02/13 23:06:48 Ready to marshal response ...
2024/02/13 23:06:48 Ready to write response ...
==> kernel <==
23:06:58 up 1:49, 0 users, load average: 0.51, 1.10, 1.48
Linux addons-913502 5.15.0-1051-gcp #59~20.04.1-Ubuntu SMP Thu Jan 25 02:51:53 UTC 2024 x86_64 x86_64 x86_64 GNU/Linux
PRETTY_NAME="Ubuntu 22.04.3 LTS"
==> kindnet [2c7ce000c4787cd59b99f0aa3712df814630b5df86dc8d4106851ef9fb5e528f] <==
I0213 23:04:56.917739 1 main.go:227] handling current node
I0213 23:05:06.921200 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
I0213 23:05:06.921225 1 main.go:227] handling current node
I0213 23:05:16.925410 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
I0213 23:05:16.925441 1 main.go:227] handling current node
I0213 23:05:26.935717 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
I0213 23:05:26.935748 1 main.go:227] handling current node
I0213 23:05:36.939996 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
I0213 23:05:36.940020 1 main.go:227] handling current node
I0213 23:05:46.951609 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
I0213 23:05:46.951633 1 main.go:227] handling current node
I0213 23:05:56.955861 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
I0213 23:05:56.955887 1 main.go:227] handling current node
I0213 23:06:06.967699 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
I0213 23:06:06.967723 1 main.go:227] handling current node
I0213 23:06:16.972263 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
I0213 23:06:16.972287 1 main.go:227] handling current node
I0213 23:06:26.983963 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
I0213 23:06:26.983997 1 main.go:227] handling current node
I0213 23:06:36.995583 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
I0213 23:06:36.995607 1 main.go:227] handling current node
I0213 23:06:46.999812 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
I0213 23:06:46.999836 1 main.go:227] handling current node
I0213 23:06:57.011722 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
I0213 23:06:57.011753 1 main.go:227] handling current node
==> kube-apiserver [8147d14fedd8c6540430e8abd97dd62cc925f2af68896a21c6414005e96c1b27] <==
I0213 23:04:28.010079 1 handler.go:232] Adding GroupVersion gadget.kinvolk.io v1alpha1 to ResourceManager
W0213 23:04:29.019572 1 cacher.go:171] Terminating all watchers from cacher traces.gadget.kinvolk.io
I0213 23:04:35.482440 1 controller.go:624] quota admission added evaluator for: volumesnapshots.snapshot.storage.k8s.io
I0213 23:05:03.925911 1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0213 23:05:03.925976 1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0213 23:05:03.936257 1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0213 23:05:03.936338 1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0213 23:05:03.940593 1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0213 23:05:03.940642 1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0213 23:05:03.942699 1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0213 23:05:03.942806 1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0213 23:05:03.950006 1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0213 23:05:03.950055 1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0213 23:05:03.955819 1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0213 23:05:03.955847 1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0213 23:05:03.969504 1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0213 23:05:03.969631 1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0213 23:05:03.969848 1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0213 23:05:03.969950 1 handler.go:232] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
W0213 23:05:04.943121 1 cacher.go:171] Terminating all watchers from cacher volumesnapshotclasses.snapshot.storage.k8s.io
W0213 23:05:04.970261 1 cacher.go:171] Terminating all watchers from cacher volumesnapshotcontents.snapshot.storage.k8s.io
W0213 23:05:04.980607 1 cacher.go:171] Terminating all watchers from cacher volumesnapshots.snapshot.storage.k8s.io
I0213 23:05:05.912376 1 controller.go:129] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Nothing (removed from the queue).
I0213 23:06:48.319048 1 alloc.go:330] "allocated clusterIPs" service="default/hello-world-app" clusterIPs={"IPv4":"10.98.251.180"}
E0213 23:06:50.510426 1 authentication.go:73] "Unable to authenticate the request" err="[invalid bearer token, serviceaccounts \"ingress-nginx\" not found]"
==> kube-controller-manager [926999f79520bbbaa76881e1d82a66d6c9dbff9b40626be7aec26bba143ebef4] <==
E0213 23:05:39.757091 1 reflector.go:147] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0213 23:05:46.124281 1 reflector.go:535] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0213 23:05:46.124313 1 reflector.go:147] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0213 23:06:00.898382 1 reflector.go:535] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0213 23:06:00.898432 1 reflector.go:147] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0213 23:06:11.724140 1 reflector.go:535] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0213 23:06:11.724175 1 reflector.go:147] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0213 23:06:12.479072 1 reflector.go:535] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0213 23:06:12.479104 1 reflector.go:147] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0213 23:06:19.234743 1 reflector.go:535] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0213 23:06:19.234776 1 reflector.go:147] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0213 23:06:32.217693 1 reflector.go:535] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0213 23:06:32.217728 1 reflector.go:147] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0213 23:06:48.161400 1 event.go:307] "Event occurred" object="default/hello-world-app" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set hello-world-app-5d77478584 to 1"
I0213 23:06:48.172558 1 event.go:307] "Event occurred" object="default/hello-world-app-5d77478584" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: hello-world-app-5d77478584-q55k8"
I0213 23:06:48.178223 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="default/hello-world-app-5d77478584" duration="16.994356ms"
I0213 23:06:48.182364 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="default/hello-world-app-5d77478584" duration="4.086572ms"
I0213 23:06:48.182465 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="default/hello-world-app-5d77478584" duration="63.944µs"
I0213 23:06:48.182562 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="default/hello-world-app-5d77478584" duration="45.63µs"
I0213 23:06:48.188523 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="default/hello-world-app-5d77478584" duration="40.007µs"
I0213 23:06:49.940268 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="default/hello-world-app-5d77478584" duration="7.59241ms"
I0213 23:06:49.940376 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="default/hello-world-app-5d77478584" duration="71.828µs"
I0213 23:06:50.474371 1 job_controller.go:562] "enqueueing job" key="ingress-nginx/ingress-nginx-admission-create"
I0213 23:06:50.475957 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="ingress-nginx/ingress-nginx-controller-69cff4fd79" duration="9.596µs"
I0213 23:06:50.478765 1 job_controller.go:562] "enqueueing job" key="ingress-nginx/ingress-nginx-admission-patch"
==> kube-proxy [a532fb0a6d4f40f6d4e96acc87a0c7a609cd1530b100bc6bdb2954f614d7ce5f] <==
I0213 23:02:26.873144 1 server_others.go:69] "Using iptables proxy"
I0213 23:02:27.263323 1 node.go:141] Successfully retrieved node IP: 192.168.49.2
I0213 23:02:27.971745 1 server.go:632] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I0213 23:02:27.986949 1 server_others.go:152] "Using iptables Proxier"
I0213 23:02:27.987068 1 server_others.go:421] "Detect-local-mode set to ClusterCIDR, but no cluster CIDR for family" ipFamily="IPv6"
I0213 23:02:27.987084 1 server_others.go:438] "Defaulting to no-op detect-local"
I0213 23:02:27.987139 1 proxier.go:251] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses"
I0213 23:02:27.987684 1 server.go:846] "Version info" version="v1.28.4"
I0213 23:02:27.987722 1 server.go:848] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I0213 23:02:28.060854 1 config.go:188] "Starting service config controller"
I0213 23:02:28.061359 1 shared_informer.go:311] Waiting for caches to sync for service config
I0213 23:02:28.060885 1 config.go:97] "Starting endpoint slice config controller"
I0213 23:02:28.061810 1 shared_informer.go:311] Waiting for caches to sync for endpoint slice config
I0213 23:02:28.061051 1 config.go:315] "Starting node config controller"
I0213 23:02:28.063460 1 shared_informer.go:311] Waiting for caches to sync for node config
I0213 23:02:28.163116 1 shared_informer.go:318] Caches are synced for endpoint slice config
I0213 23:02:28.163195 1 shared_informer.go:318] Caches are synced for service config
I0213 23:02:28.163588 1 shared_informer.go:318] Caches are synced for node config
==> kube-scheduler [65dbb4b3290b81d79b9ed0f3ebb32d5c3931cea62c5ca1a83a3e5ced0af68aa6] <==
W0213 23:02:06.589630 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
W0213 23:02:06.589802 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E0213 23:02:06.589809 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
E0213 23:02:06.589824 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
W0213 23:02:06.589769 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
E0213 23:02:06.589849 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
W0213 23:02:06.590431 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E0213 23:02:06.590458 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
W0213 23:02:06.590437 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
E0213 23:02:06.590477 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
W0213 23:02:06.590809 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
E0213 23:02:06.590834 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
W0213 23:02:06.591277 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
W0213 23:02:06.591297 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E0213 23:02:06.591314 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E0213 23:02:06.591311 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
W0213 23:02:07.502607 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E0213 23:02:07.502647 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
W0213 23:02:07.502615 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
E0213 23:02:07.502670 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
W0213 23:02:07.543261 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E0213 23:02:07.543288 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
W0213 23:02:07.669380 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
E0213 23:02:07.669416 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
I0213 23:02:07.879015 1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Feb 13 23:06:48 addons-913502 kubelet[1554]: I0213 23:06:48.288573 1554 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lc7lb\" (UniqueName: \"kubernetes.io/projected/b284e710-34e5-467f-9e34-bc1965295747-kube-api-access-lc7lb\") pod \"hello-world-app-5d77478584-q55k8\" (UID: \"b284e710-34e5-467f-9e34-bc1965295747\") " pod="default/hello-world-app-5d77478584-q55k8"
Feb 13 23:06:48 addons-913502 kubelet[1554]: I0213 23:06:48.288667 1554 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"gcp-creds\" (UniqueName: \"kubernetes.io/host-path/b284e710-34e5-467f-9e34-bc1965295747-gcp-creds\") pod \"hello-world-app-5d77478584-q55k8\" (UID: \"b284e710-34e5-467f-9e34-bc1965295747\") " pod="default/hello-world-app-5d77478584-q55k8"
Feb 13 23:06:48 addons-913502 kubelet[1554]: W0213 23:06:48.573658 1554 manager.go:1159] Failed to process watch event {EventType:0 Name:/docker/3a3c4bea7929182449776c05fa455c4211c81c7e833202acb79be3ab764f9ccb/crio-2dd7bb311ccf265e922e2314a30736bc7310d7af34b9382021487be5ad03a406 WatchSource:0}: Error finding container 2dd7bb311ccf265e922e2314a30736bc7310d7af34b9382021487be5ad03a406: Status 404 returned error can't find the container with id 2dd7bb311ccf265e922e2314a30736bc7310d7af34b9382021487be5ad03a406
Feb 13 23:06:49 addons-913502 kubelet[1554]: I0213 23:06:49.497546 1554 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tnjsb\" (UniqueName: \"kubernetes.io/projected/463f7dd2-ee46-408c-8790-4a7318e64279-kube-api-access-tnjsb\") pod \"463f7dd2-ee46-408c-8790-4a7318e64279\" (UID: \"463f7dd2-ee46-408c-8790-4a7318e64279\") "
Feb 13 23:06:49 addons-913502 kubelet[1554]: I0213 23:06:49.499441 1554 operation_generator.go:882] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/463f7dd2-ee46-408c-8790-4a7318e64279-kube-api-access-tnjsb" (OuterVolumeSpecName: "kube-api-access-tnjsb") pod "463f7dd2-ee46-408c-8790-4a7318e64279" (UID: "463f7dd2-ee46-408c-8790-4a7318e64279"). InnerVolumeSpecName "kube-api-access-tnjsb". PluginName "kubernetes.io/projected", VolumeGidValue ""
Feb 13 23:06:49 addons-913502 kubelet[1554]: I0213 23:06:49.598651 1554 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-tnjsb\" (UniqueName: \"kubernetes.io/projected/463f7dd2-ee46-408c-8790-4a7318e64279-kube-api-access-tnjsb\") on node \"addons-913502\" DevicePath \"\""
Feb 13 23:06:49 addons-913502 kubelet[1554]: I0213 23:06:49.921226 1554 scope.go:117] "RemoveContainer" containerID="f462d4866b127b659d396a233d73064ccb1267ca980a3693e0617ab7cf2ed066"
Feb 13 23:06:49 addons-913502 kubelet[1554]: I0213 23:06:49.939302 1554 scope.go:117] "RemoveContainer" containerID="f462d4866b127b659d396a233d73064ccb1267ca980a3693e0617ab7cf2ed066"
Feb 13 23:06:49 addons-913502 kubelet[1554]: E0213 23:06:49.939752 1554 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f462d4866b127b659d396a233d73064ccb1267ca980a3693e0617ab7cf2ed066\": container with ID starting with f462d4866b127b659d396a233d73064ccb1267ca980a3693e0617ab7cf2ed066 not found: ID does not exist" containerID="f462d4866b127b659d396a233d73064ccb1267ca980a3693e0617ab7cf2ed066"
Feb 13 23:06:49 addons-913502 kubelet[1554]: I0213 23:06:49.939820 1554 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f462d4866b127b659d396a233d73064ccb1267ca980a3693e0617ab7cf2ed066"} err="failed to get container status \"f462d4866b127b659d396a233d73064ccb1267ca980a3693e0617ab7cf2ed066\": rpc error: code = NotFound desc = could not find container \"f462d4866b127b659d396a233d73064ccb1267ca980a3693e0617ab7cf2ed066\": container with ID starting with f462d4866b127b659d396a233d73064ccb1267ca980a3693e0617ab7cf2ed066 not found: ID does not exist"
Feb 13 23:06:49 addons-913502 kubelet[1554]: I0213 23:06:49.944244 1554 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="default/hello-world-app-5d77478584-q55k8" podStartSLOduration=1.192168864 podCreationTimestamp="2024-02-13 23:06:48 +0000 UTC" firstStartedPulling="2024-02-13 23:06:48.576542954 +0000 UTC m=+279.161483063" lastFinishedPulling="2024-02-13 23:06:49.328550653 +0000 UTC m=+279.913490771" observedRunningTime="2024-02-13 23:06:49.93340965 +0000 UTC m=+280.518349792" watchObservedRunningTime="2024-02-13 23:06:49.944176572 +0000 UTC m=+280.529116726"
Feb 13 23:06:51 addons-913502 kubelet[1554]: I0213 23:06:51.501668 1554 kubelet_volumes.go:161] "Cleaned up orphaned pod volumes dir" podUID="463f7dd2-ee46-408c-8790-4a7318e64279" path="/var/lib/kubelet/pods/463f7dd2-ee46-408c-8790-4a7318e64279/volumes"
Feb 13 23:06:51 addons-913502 kubelet[1554]: I0213 23:06:51.502028 1554 kubelet_volumes.go:161] "Cleaned up orphaned pod volumes dir" podUID="475e3344-264d-46da-9f01-658ac088fddb" path="/var/lib/kubelet/pods/475e3344-264d-46da-9f01-658ac088fddb/volumes"
Feb 13 23:06:51 addons-913502 kubelet[1554]: I0213 23:06:51.502342 1554 kubelet_volumes.go:161] "Cleaned up orphaned pod volumes dir" podUID="a6dde495-907f-4233-b3e9-f95c6bf230cc" path="/var/lib/kubelet/pods/a6dde495-907f-4233-b3e9-f95c6bf230cc/volumes"
Feb 13 23:06:53 addons-913502 kubelet[1554]: I0213 23:06:53.825521 1554 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/5185a108-4307-4b54-a0fe-4895307c6b78-webhook-cert\") pod \"5185a108-4307-4b54-a0fe-4895307c6b78\" (UID: \"5185a108-4307-4b54-a0fe-4895307c6b78\") "
Feb 13 23:06:53 addons-913502 kubelet[1554]: I0213 23:06:53.825584 1554 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mmnl5\" (UniqueName: \"kubernetes.io/projected/5185a108-4307-4b54-a0fe-4895307c6b78-kube-api-access-mmnl5\") pod \"5185a108-4307-4b54-a0fe-4895307c6b78\" (UID: \"5185a108-4307-4b54-a0fe-4895307c6b78\") "
Feb 13 23:06:53 addons-913502 kubelet[1554]: I0213 23:06:53.827554 1554 operation_generator.go:882] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5185a108-4307-4b54-a0fe-4895307c6b78-kube-api-access-mmnl5" (OuterVolumeSpecName: "kube-api-access-mmnl5") pod "5185a108-4307-4b54-a0fe-4895307c6b78" (UID: "5185a108-4307-4b54-a0fe-4895307c6b78"). InnerVolumeSpecName "kube-api-access-mmnl5". PluginName "kubernetes.io/projected", VolumeGidValue ""
Feb 13 23:06:53 addons-913502 kubelet[1554]: I0213 23:06:53.827712 1554 operation_generator.go:882] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5185a108-4307-4b54-a0fe-4895307c6b78-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "5185a108-4307-4b54-a0fe-4895307c6b78" (UID: "5185a108-4307-4b54-a0fe-4895307c6b78"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue ""
Feb 13 23:06:53 addons-913502 kubelet[1554]: I0213 23:06:53.926058 1554 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-mmnl5\" (UniqueName: \"kubernetes.io/projected/5185a108-4307-4b54-a0fe-4895307c6b78-kube-api-access-mmnl5\") on node \"addons-913502\" DevicePath \"\""
Feb 13 23:06:53 addons-913502 kubelet[1554]: I0213 23:06:53.926099 1554 reconciler_common.go:300] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/5185a108-4307-4b54-a0fe-4895307c6b78-webhook-cert\") on node \"addons-913502\" DevicePath \"\""
Feb 13 23:06:53 addons-913502 kubelet[1554]: I0213 23:06:53.933831 1554 scope.go:117] "RemoveContainer" containerID="a7dd59c495e2f1fac800152c02cb43564fa7e66c5b3f65d5ef1951cee9176dc2"
Feb 13 23:06:53 addons-913502 kubelet[1554]: I0213 23:06:53.950661 1554 scope.go:117] "RemoveContainer" containerID="a7dd59c495e2f1fac800152c02cb43564fa7e66c5b3f65d5ef1951cee9176dc2"
Feb 13 23:06:53 addons-913502 kubelet[1554]: E0213 23:06:53.951059 1554 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a7dd59c495e2f1fac800152c02cb43564fa7e66c5b3f65d5ef1951cee9176dc2\": container with ID starting with a7dd59c495e2f1fac800152c02cb43564fa7e66c5b3f65d5ef1951cee9176dc2 not found: ID does not exist" containerID="a7dd59c495e2f1fac800152c02cb43564fa7e66c5b3f65d5ef1951cee9176dc2"
Feb 13 23:06:53 addons-913502 kubelet[1554]: I0213 23:06:53.951111 1554 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7dd59c495e2f1fac800152c02cb43564fa7e66c5b3f65d5ef1951cee9176dc2"} err="failed to get container status \"a7dd59c495e2f1fac800152c02cb43564fa7e66c5b3f65d5ef1951cee9176dc2\": rpc error: code = NotFound desc = could not find container \"a7dd59c495e2f1fac800152c02cb43564fa7e66c5b3f65d5ef1951cee9176dc2\": container with ID starting with a7dd59c495e2f1fac800152c02cb43564fa7e66c5b3f65d5ef1951cee9176dc2 not found: ID does not exist"
Feb 13 23:06:55 addons-913502 kubelet[1554]: I0213 23:06:55.501317 1554 kubelet_volumes.go:161] "Cleaned up orphaned pod volumes dir" podUID="5185a108-4307-4b54-a0fe-4895307c6b78" path="/var/lib/kubelet/pods/5185a108-4307-4b54-a0fe-4895307c6b78/volumes"
==> storage-provisioner [33a1dd0fd8243359d3bd9ca651a1228961de2f37c1f4b4568bb28a94b0c6b511] <==
I0213 23:02:58.173397 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I0213 23:02:58.184949 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I0213 23:02:58.185014 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I0213 23:02:58.192648 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I0213 23:02:58.192875 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_addons-913502_b503f7ed-30f9-41fc-a132-85a9d16dcbdd!
I0213 23:02:58.193029 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"b73fd4ce-47e4-421a-9ef0-bac8cdb58009", APIVersion:"v1", ResourceVersion:"949", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' addons-913502_b503f7ed-30f9-41fc-a132-85a9d16dcbdd became leader
I0213 23:02:58.293351 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_addons-913502_b503f7ed-30f9-41fc-a132-85a9d16dcbdd!
-- /stdout --
helpers_test.go:254: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p addons-913502 -n addons-913502
helpers_test.go:261: (dbg) Run: kubectl --context addons-913502 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:285: <<< TestAddons/parallel/Ingress FAILED: end of post-mortem logs <<<
helpers_test.go:286: ---------------------/post-mortem---------------------------------
--- FAIL: TestAddons/parallel/Ingress (154.73s)