=== RUN TestAddons/parallel/Ingress
=== PAUSE TestAddons/parallel/Ingress
=== CONT TestAddons/parallel/Ingress
addons_test.go:209: (dbg) Run: kubectl --context addons-808918 wait --for=condition=ready --namespace=ingress-nginx pod --selector=app.kubernetes.io/component=controller --timeout=90s
addons_test.go:234: (dbg) Run: kubectl --context addons-808918 replace --force -f testdata/nginx-ingress-v1.yaml
addons_test.go:247: (dbg) Run: kubectl --context addons-808918 replace --force -f testdata/nginx-pod-svc.yaml
addons_test.go:252: (dbg) TestAddons/parallel/Ingress: waiting 8m0s for pods matching "run=nginx" in namespace "default" ...
helpers_test.go:344: "nginx" [bef3d5c7-f28f-481a-97de-a61ac3ad1d02] Pending / Ready:ContainersNotReady (containers with unready status: [nginx]) / ContainersReady:ContainersNotReady (containers with unready status: [nginx])
helpers_test.go:344: "nginx" [bef3d5c7-f28f-481a-97de-a61ac3ad1d02] Running
addons_test.go:252: (dbg) TestAddons/parallel/Ingress: run=nginx healthy within 10.002802845s
addons_test.go:264: (dbg) Run: out/minikube-linux-amd64 -p addons-808918 ssh "curl -s http://127.0.0.1/ -H 'Host: nginx.example.com'"
addons_test.go:264: (dbg) Non-zero exit: out/minikube-linux-amd64 -p addons-808918 ssh "curl -s http://127.0.0.1/ -H 'Host: nginx.example.com'": exit status 1 (2m9.682811236s)
** stderr **
ssh: Process exited with status 28
** /stderr **
addons_test.go:280: failed to get expected response from http://127.0.0.1/ within minikube: exit status 1
addons_test.go:288: (dbg) Run: kubectl --context addons-808918 replace --force -f testdata/ingress-dns-example-v1.yaml
addons_test.go:293: (dbg) Run: out/minikube-linux-amd64 -p addons-808918 ip
addons_test.go:299: (dbg) Run: nslookup hello-john.test 192.168.49.2
addons_test.go:308: (dbg) Run: out/minikube-linux-amd64 -p addons-808918 addons disable ingress-dns --alsologtostderr -v=1
addons_test.go:308: (dbg) Done: out/minikube-linux-amd64 -p addons-808918 addons disable ingress-dns --alsologtostderr -v=1: (1.498583657s)
addons_test.go:313: (dbg) Run: out/minikube-linux-amd64 -p addons-808918 addons disable ingress --alsologtostderr -v=1
addons_test.go:313: (dbg) Done: out/minikube-linux-amd64 -p addons-808918 addons disable ingress --alsologtostderr -v=1: (7.597457543s)
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:230: ======> post-mortem[TestAddons/parallel/Ingress]: docker inspect <======
helpers_test.go:231: (dbg) Run: docker inspect addons-808918
helpers_test.go:235: (dbg) docker inspect addons-808918:
-- stdout --
[
{
"Id": "e10b6832fb7ea74054546a845ed437afa46b452e741164e3783633cfb2ccb19d",
"Created": "2024-08-12T17:56:15.740914333Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 391284,
"ExitCode": 0,
"Error": "",
"StartedAt": "2024-08-12T17:56:15.844056063Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:d17bd43848dafc874617b40727c19e00972ff1d078819db667eabdbf87894954",
"ResolvConfPath": "/var/lib/docker/containers/e10b6832fb7ea74054546a845ed437afa46b452e741164e3783633cfb2ccb19d/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/e10b6832fb7ea74054546a845ed437afa46b452e741164e3783633cfb2ccb19d/hostname",
"HostsPath": "/var/lib/docker/containers/e10b6832fb7ea74054546a845ed437afa46b452e741164e3783633cfb2ccb19d/hosts",
"LogPath": "/var/lib/docker/containers/e10b6832fb7ea74054546a845ed437afa46b452e741164e3783633cfb2ccb19d/e10b6832fb7ea74054546a845ed437afa46b452e741164e3783633cfb2ccb19d-json.log",
"Name": "/addons-808918",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"addons-808918:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {
"max-size": "100m"
}
},
"NetworkMode": "addons-808918",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 4194304000,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 8388608000,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"LowerDir": "/var/lib/docker/overlay2/256ed2ddafae8b48da33ac93714079057a643a0edee41e1b6640d785f16e4df1-init/diff:/var/lib/docker/overlay2/07e187351611f1dfccdd9a045e09bf27d51dfeac4d877947d31bb83c6dbf6e09/diff",
"MergedDir": "/var/lib/docker/overlay2/256ed2ddafae8b48da33ac93714079057a643a0edee41e1b6640d785f16e4df1/merged",
"UpperDir": "/var/lib/docker/overlay2/256ed2ddafae8b48da33ac93714079057a643a0edee41e1b6640d785f16e4df1/diff",
"WorkDir": "/var/lib/docker/overlay2/256ed2ddafae8b48da33ac93714079057a643a0edee41e1b6640d785f16e4df1/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "addons-808918",
"Source": "/var/lib/docker/volumes/addons-808918/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "addons-808918",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723026928-19389@sha256:7715fe0c5dce35b4eb757765cbbe02d40cd8b5effa0639735e42ad89f4f51ef0",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "addons-808918",
"name.minikube.sigs.k8s.io": "addons-808918",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "703387f67e023c99d49d2a8df9c3ef0bcf760482feee75e36aa0c23aa789aa10",
"SandboxKey": "/var/run/docker/netns/703387f67e02",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33138"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33139"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33142"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33140"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33141"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"addons-808918": {
"IPAMConfig": {
"IPv4Address": "192.168.49.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "02:42:c0:a8:31:02",
"DriverOpts": null,
"NetworkID": "7715664dca84518c8ac79036f8f7a42ea4aae4f36c6b489b548e9d17a720fb0c",
"EndpointID": "4289ac478c4723c3bddcd82e90af67c113640460e86d0223cf0220552ec47dbc",
"Gateway": "192.168.49.1",
"IPAddress": "192.168.49.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"addons-808918",
"e10b6832fb7e"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:239: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p addons-808918 -n addons-808918
helpers_test.go:244: <<< TestAddons/parallel/Ingress FAILED: start of post-mortem logs <<<
helpers_test.go:245: ======> post-mortem[TestAddons/parallel/Ingress]: minikube logs <======
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 -p addons-808918 logs -n 25
helpers_test.go:247: (dbg) Done: out/minikube-linux-amd64 -p addons-808918 logs -n 25: (1.073831455s)
helpers_test.go:252: TestAddons/parallel/Ingress logs:
-- stdout --
==> Audit <==
|---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
| Command | Args | Profile | User | Version | Start Time | End Time |
|---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
| start | --download-only -p | download-docker-578216 | jenkins | v1.33.1 | 12 Aug 24 17:55 UTC | |
| | download-docker-578216 | | | | | |
| | --alsologtostderr | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=crio | | | | | |
| delete | -p download-docker-578216 | download-docker-578216 | jenkins | v1.33.1 | 12 Aug 24 17:55 UTC | 12 Aug 24 17:55 UTC |
| start | --download-only -p | binary-mirror-420801 | jenkins | v1.33.1 | 12 Aug 24 17:55 UTC | |
| | binary-mirror-420801 | | | | | |
| | --alsologtostderr | | | | | |
| | --binary-mirror | | | | | |
| | http://127.0.0.1:32957 | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=crio | | | | | |
| delete | -p binary-mirror-420801 | binary-mirror-420801 | jenkins | v1.33.1 | 12 Aug 24 17:55 UTC | 12 Aug 24 17:55 UTC |
| addons | enable dashboard -p | addons-808918 | jenkins | v1.33.1 | 12 Aug 24 17:55 UTC | |
| | addons-808918 | | | | | |
| addons | disable dashboard -p | addons-808918 | jenkins | v1.33.1 | 12 Aug 24 17:55 UTC | |
| | addons-808918 | | | | | |
| start | -p addons-808918 --wait=true | addons-808918 | jenkins | v1.33.1 | 12 Aug 24 17:55 UTC | 12 Aug 24 17:58 UTC |
| | --memory=4000 --alsologtostderr | | | | | |
| | --addons=registry | | | | | |
| | --addons=metrics-server | | | | | |
| | --addons=volumesnapshots | | | | | |
| | --addons=csi-hostpath-driver | | | | | |
| | --addons=gcp-auth | | | | | |
| | --addons=cloud-spanner | | | | | |
| | --addons=inspektor-gadget | | | | | |
| | --addons=storage-provisioner-rancher | | | | | |
| | --addons=nvidia-device-plugin | | | | | |
| | --addons=yakd --addons=volcano | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=crio | | | | | |
| | --addons=ingress | | | | | |
| | --addons=ingress-dns | | | | | |
| | --addons=helm-tiller | | | | | |
| addons | addons-808918 addons disable | addons-808918 | jenkins | v1.33.1 | 12 Aug 24 17:59 UTC | 12 Aug 24 17:59 UTC |
| | gcp-auth --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| ssh | addons-808918 ssh cat | addons-808918 | jenkins | v1.33.1 | 12 Aug 24 17:59 UTC | 12 Aug 24 17:59 UTC |
| | /opt/local-path-provisioner/pvc-9a676841-9b09-42fc-bb69-c6ae0348fdbc_default_test-pvc/file1 | | | | | |
| addons | addons-808918 addons disable | addons-808918 | jenkins | v1.33.1 | 12 Aug 24 17:59 UTC | 12 Aug 24 18:00 UTC |
| | storage-provisioner-rancher | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| ip | addons-808918 ip | addons-808918 | jenkins | v1.33.1 | 12 Aug 24 17:59 UTC | 12 Aug 24 17:59 UTC |
| addons | addons-808918 addons disable | addons-808918 | jenkins | v1.33.1 | 12 Aug 24 17:59 UTC | 12 Aug 24 17:59 UTC |
| | registry --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| addons | addons-808918 addons disable | addons-808918 | jenkins | v1.33.1 | 12 Aug 24 17:59 UTC | 12 Aug 24 17:59 UTC |
| | yakd --alsologtostderr -v=1 | | | | | |
| addons | disable nvidia-device-plugin | addons-808918 | jenkins | v1.33.1 | 12 Aug 24 17:59 UTC | 12 Aug 24 17:59 UTC |
| | -p addons-808918 | | | | | |
| addons | addons-808918 addons disable | addons-808918 | jenkins | v1.33.1 | 12 Aug 24 17:59 UTC | 12 Aug 24 17:59 UTC |
| | helm-tiller --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| addons | disable inspektor-gadget -p | addons-808918 | jenkins | v1.33.1 | 12 Aug 24 17:59 UTC | 12 Aug 24 17:59 UTC |
| | addons-808918 | | | | | |
| addons | addons-808918 addons | addons-808918 | jenkins | v1.33.1 | 12 Aug 24 18:00 UTC | 12 Aug 24 18:00 UTC |
| | disable csi-hostpath-driver | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-808918 addons | addons-808918 | jenkins | v1.33.1 | 12 Aug 24 18:00 UTC | 12 Aug 24 18:00 UTC |
| | disable volumesnapshots | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | enable headlamp | addons-808918 | jenkins | v1.33.1 | 12 Aug 24 18:00 UTC | 12 Aug 24 18:00 UTC |
| | -p addons-808918 | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | disable cloud-spanner -p | addons-808918 | jenkins | v1.33.1 | 12 Aug 24 18:00 UTC | 12 Aug 24 18:00 UTC |
| | addons-808918 | | | | | |
| ssh | addons-808918 ssh curl -s | addons-808918 | jenkins | v1.33.1 | 12 Aug 24 18:00 UTC | |
| | http://127.0.0.1/ -H 'Host: | | | | | |
| | nginx.example.com' | | | | | |
| addons | addons-808918 addons disable | addons-808918 | jenkins | v1.33.1 | 12 Aug 24 18:00 UTC | 12 Aug 24 18:00 UTC |
| | headlamp --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| ip | addons-808918 ip | addons-808918 | jenkins | v1.33.1 | 12 Aug 24 18:02 UTC | 12 Aug 24 18:02 UTC |
| addons | addons-808918 addons disable | addons-808918 | jenkins | v1.33.1 | 12 Aug 24 18:02 UTC | 12 Aug 24 18:02 UTC |
| | ingress-dns --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| addons | addons-808918 addons disable | addons-808918 | jenkins | v1.33.1 | 12 Aug 24 18:02 UTC | 12 Aug 24 18:02 UTC |
| | ingress --alsologtostderr -v=1 | | | | | |
|---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
==> Last Start <==
Log file created at: 2024/08/12 17:55:54
Running on machine: ubuntu-20-agent-15
Binary: Built with gc go1.22.5 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I0812 17:55:54.372534 390563 out.go:291] Setting OutFile to fd 1 ...
I0812 17:55:54.372628 390563 out.go:338] TERM=,COLORTERM=, which probably does not support color
I0812 17:55:54.372635 390563 out.go:304] Setting ErrFile to fd 2...
I0812 17:55:54.372639 390563 out.go:338] TERM=,COLORTERM=, which probably does not support color
I0812 17:55:54.372805 390563 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19419-381930/.minikube/bin
I0812 17:55:54.373395 390563 out.go:298] Setting JSON to false
I0812 17:55:54.374296 390563 start.go:129] hostinfo: {"hostname":"ubuntu-20-agent-15","uptime":5901,"bootTime":1723479453,"procs":172,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1066-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I0812 17:55:54.374356 390563 start.go:139] virtualization: kvm guest
I0812 17:55:54.376331 390563 out.go:177] * [addons-808918] minikube v1.33.1 on Ubuntu 20.04 (kvm/amd64)
I0812 17:55:54.377638 390563 out.go:177] - MINIKUBE_LOCATION=19419
I0812 17:55:54.377636 390563 notify.go:220] Checking for updates...
I0812 17:55:54.379891 390563 out.go:177] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I0812 17:55:54.381021 390563 out.go:177] - KUBECONFIG=/home/jenkins/minikube-integration/19419-381930/kubeconfig
I0812 17:55:54.382253 390563 out.go:177] - MINIKUBE_HOME=/home/jenkins/minikube-integration/19419-381930/.minikube
I0812 17:55:54.383483 390563 out.go:177] - MINIKUBE_BIN=out/minikube-linux-amd64
I0812 17:55:54.384601 390563 out.go:177] - MINIKUBE_FORCE_SYSTEMD=
I0812 17:55:54.385917 390563 driver.go:392] Setting default libvirt URI to qemu:///system
I0812 17:55:54.407957 390563 docker.go:123] docker version: linux-27.1.1:Docker Engine - Community
I0812 17:55:54.408083 390563 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0812 17:55:54.453887 390563 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:30 OomKillDisable:true NGoroutines:45 SystemTime:2024-08-12 17:55:54.445466324 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1066-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:x86
_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33647939584 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-15 Labels:[] ExperimentalBuild:false ServerVersion:27.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:8fc6bcff51318944179630522a095cc9dbf9f353 Expected:8fc6bcff51318944179630522a095cc9dbf9f353} RuncCommit:{ID:v1.1.13-0-g58aa920 Expected:v1.1.13-0-g58aa920} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerEr
rors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.16.1] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.29.1] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0812 17:55:54.453996 390563 docker.go:307] overlay module found
I0812 17:55:54.455840 390563 out.go:177] * Using the docker driver based on user configuration
I0812 17:55:54.456987 390563 start.go:297] selected driver: docker
I0812 17:55:54.456998 390563 start.go:901] validating driver "docker" against <nil>
I0812 17:55:54.457009 390563 start.go:912] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I0812 17:55:54.457782 390563 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0812 17:55:54.504753 390563 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:30 OomKillDisable:true NGoroutines:45 SystemTime:2024-08-12 17:55:54.496193964 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1066-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:x86
_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33647939584 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-15 Labels:[] ExperimentalBuild:false ServerVersion:27.1.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:8fc6bcff51318944179630522a095cc9dbf9f353 Expected:8fc6bcff51318944179630522a095cc9dbf9f353} RuncCommit:{ID:v1.1.13-0-g58aa920 Expected:v1.1.13-0-g58aa920} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerEr
rors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.16.1] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.29.1] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0812 17:55:54.504931 390563 start_flags.go:310] no existing cluster config was found, will generate one from the flags
I0812 17:55:54.505133 390563 start_flags.go:947] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0812 17:55:54.506672 390563 out.go:177] * Using Docker driver with root privileges
I0812 17:55:54.508033 390563 cni.go:84] Creating CNI manager for ""
I0812 17:55:54.508049 390563 cni.go:143] "docker" driver + "crio" runtime found, recommending kindnet
I0812 17:55:54.508057 390563 start_flags.go:319] Found "CNI" CNI - setting NetworkPlugin=cni
I0812 17:55:54.508128 390563 start.go:340] cluster config:
{Name:addons-808918 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723026928-19389@sha256:7715fe0c5dce35b4eb757765cbbe02d40cd8b5effa0639735e42ad89f4f51ef0 Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.30.3 ClusterName:addons-808918 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime
:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.30.3 ContainerRuntime:crio ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSH
AgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0812 17:55:54.509378 390563 out.go:177] * Starting "addons-808918" primary control-plane node in "addons-808918" cluster
I0812 17:55:54.510394 390563 cache.go:121] Beginning downloading kic base image for docker with crio
I0812 17:55:54.511553 390563 out.go:177] * Pulling base image v0.0.44-1723026928-19389 ...
I0812 17:55:54.512589 390563 preload.go:131] Checking if preload exists for k8s version v1.30.3 and runtime crio
I0812 17:55:54.512616 390563 preload.go:146] Found local preload: /home/jenkins/minikube-integration/19419-381930/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.30.3-cri-o-overlay-amd64.tar.lz4
I0812 17:55:54.512624 390563 cache.go:56] Caching tarball of preloaded images
I0812 17:55:54.512681 390563 image.go:79] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723026928-19389@sha256:7715fe0c5dce35b4eb757765cbbe02d40cd8b5effa0639735e42ad89f4f51ef0 in local docker daemon
I0812 17:55:54.512695 390563 preload.go:172] Found /home/jenkins/minikube-integration/19419-381930/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.30.3-cri-o-overlay-amd64.tar.lz4 in cache, skipping download
I0812 17:55:54.512704 390563 cache.go:59] Finished verifying existence of preloaded tar for v1.30.3 on crio
I0812 17:55:54.513030 390563 profile.go:143] Saving config to /home/jenkins/minikube-integration/19419-381930/.minikube/profiles/addons-808918/config.json ...
I0812 17:55:54.513059 390563 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19419-381930/.minikube/profiles/addons-808918/config.json: {Name:mkce0d62f88b3743c4b985bf5bdfe54d4f6ae0cf Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0812 17:55:54.527176 390563 cache.go:149] Downloading gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723026928-19389@sha256:7715fe0c5dce35b4eb757765cbbe02d40cd8b5effa0639735e42ad89f4f51ef0 to local cache
I0812 17:55:54.527278 390563 image.go:63] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723026928-19389@sha256:7715fe0c5dce35b4eb757765cbbe02d40cd8b5effa0639735e42ad89f4f51ef0 in local cache directory
I0812 17:55:54.527292 390563 image.go:66] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723026928-19389@sha256:7715fe0c5dce35b4eb757765cbbe02d40cd8b5effa0639735e42ad89f4f51ef0 in local cache directory, skipping pull
I0812 17:55:54.527296 390563 image.go:135] gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723026928-19389@sha256:7715fe0c5dce35b4eb757765cbbe02d40cd8b5effa0639735e42ad89f4f51ef0 exists in cache, skipping pull
I0812 17:55:54.527303 390563 cache.go:152] successfully saved gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723026928-19389@sha256:7715fe0c5dce35b4eb757765cbbe02d40cd8b5effa0639735e42ad89f4f51ef0 as a tarball
I0812 17:55:54.527309 390563 cache.go:162] Loading gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723026928-19389@sha256:7715fe0c5dce35b4eb757765cbbe02d40cd8b5effa0639735e42ad89f4f51ef0 from local cache
I0812 17:56:07.342245 390563 cache.go:164] successfully loaded and using gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723026928-19389@sha256:7715fe0c5dce35b4eb757765cbbe02d40cd8b5effa0639735e42ad89f4f51ef0 from cached tarball
I0812 17:56:07.342291 390563 cache.go:194] Successfully downloaded all kic artifacts
I0812 17:56:07.342338 390563 start.go:360] acquireMachinesLock for addons-808918: {Name:mkb5ffd1644d0c67f171599800e9e0e0a8758a2d Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0812 17:56:07.342443 390563 start.go:364] duration metric: took 83.564µs to acquireMachinesLock for "addons-808918"
I0812 17:56:07.342467 390563 start.go:93] Provisioning new machine with config: &{Name:addons-808918 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723026928-19389@sha256:7715fe0c5dce35b4eb757765cbbe02d40cd8b5effa0639735e42ad89f4f51ef0 Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.30.3 ClusterName:addons-808918 Namespace:default APIServerHAVIP: APIServerName:min
ikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.30.3 ContainerRuntime:crio ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQe
muFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.30.3 ContainerRuntime:crio ControlPlane:true Worker:true}
I0812 17:56:07.342539 390563 start.go:125] createHost starting for "" (driver="docker")
I0812 17:56:07.425436 390563 out.go:204] * Creating docker container (CPUs=2, Memory=4000MB) ...
I0812 17:56:07.425706 390563 start.go:159] libmachine.API.Create for "addons-808918" (driver="docker")
I0812 17:56:07.425739 390563 client.go:168] LocalClient.Create starting
I0812 17:56:07.425866 390563 main.go:141] libmachine: Creating CA: /home/jenkins/minikube-integration/19419-381930/.minikube/certs/ca.pem
I0812 17:56:07.510645 390563 main.go:141] libmachine: Creating client certificate: /home/jenkins/minikube-integration/19419-381930/.minikube/certs/cert.pem
I0812 17:56:07.697075 390563 cli_runner.go:164] Run: docker network inspect addons-808918 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W0812 17:56:07.712828 390563 cli_runner.go:211] docker network inspect addons-808918 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I0812 17:56:07.712937 390563 network_create.go:284] running [docker network inspect addons-808918] to gather additional debugging logs...
I0812 17:56:07.712966 390563 cli_runner.go:164] Run: docker network inspect addons-808918
W0812 17:56:07.727660 390563 cli_runner.go:211] docker network inspect addons-808918 returned with exit code 1
I0812 17:56:07.727694 390563 network_create.go:287] error running [docker network inspect addons-808918]: docker network inspect addons-808918: exit status 1
stdout:
[]
stderr:
Error response from daemon: network addons-808918 not found
I0812 17:56:07.727716 390563 network_create.go:289] output of [docker network inspect addons-808918]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network addons-808918 not found
** /stderr **
I0812 17:56:07.727804 390563 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0812 17:56:07.744002 390563 network.go:206] using free private subnet 192.168.49.0/24: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001ac47b0}
I0812 17:56:07.744078 390563 network_create.go:124] attempt to create docker network addons-808918 192.168.49.0/24 with gateway 192.168.49.1 and MTU of 1500 ...
I0812 17:56:07.744133 390563 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.49.0/24 --gateway=192.168.49.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=addons-808918 addons-808918
I0812 17:56:08.001667 390563 network_create.go:108] docker network addons-808918 192.168.49.0/24 created
I0812 17:56:08.001700 390563 kic.go:121] calculated static IP "192.168.49.2" for the "addons-808918" container
I0812 17:56:08.001774 390563 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0812 17:56:08.016522 390563 cli_runner.go:164] Run: docker volume create addons-808918 --label name.minikube.sigs.k8s.io=addons-808918 --label created_by.minikube.sigs.k8s.io=true
I0812 17:56:08.114383 390563 oci.go:103] Successfully created a docker volume addons-808918
I0812 17:56:08.114484 390563 cli_runner.go:164] Run: docker run --rm --name addons-808918-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-808918 --entrypoint /usr/bin/test -v addons-808918:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723026928-19389@sha256:7715fe0c5dce35b4eb757765cbbe02d40cd8b5effa0639735e42ad89f4f51ef0 -d /var/lib
I0812 17:56:11.018049 390563 cli_runner.go:217] Completed: docker run --rm --name addons-808918-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-808918 --entrypoint /usr/bin/test -v addons-808918:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723026928-19389@sha256:7715fe0c5dce35b4eb757765cbbe02d40cd8b5effa0639735e42ad89f4f51ef0 -d /var/lib: (2.903505036s)
I0812 17:56:11.018080 390563 oci.go:107] Successfully prepared a docker volume addons-808918
I0812 17:56:11.018097 390563 preload.go:131] Checking if preload exists for k8s version v1.30.3 and runtime crio
I0812 17:56:11.018125 390563 kic.go:194] Starting extracting preloaded images to volume ...
I0812 17:56:11.018183 390563 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/19419-381930/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.30.3-cri-o-overlay-amd64.tar.lz4:/preloaded.tar:ro -v addons-808918:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723026928-19389@sha256:7715fe0c5dce35b4eb757765cbbe02d40cd8b5effa0639735e42ad89f4f51ef0 -I lz4 -xf /preloaded.tar -C /extractDir
I0812 17:56:15.681972 390563 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/19419-381930/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.30.3-cri-o-overlay-amd64.tar.lz4:/preloaded.tar:ro -v addons-808918:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723026928-19389@sha256:7715fe0c5dce35b4eb757765cbbe02d40cd8b5effa0639735e42ad89f4f51ef0 -I lz4 -xf /preloaded.tar -C /extractDir: (4.663739271s)
I0812 17:56:15.682016 390563 kic.go:203] duration metric: took 4.663878076s to extract preloaded images to volume ...
W0812 17:56:15.682152 390563 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I0812 17:56:15.682254 390563 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0812 17:56:15.726538 390563 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname addons-808918 --name addons-808918 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-808918 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=addons-808918 --network addons-808918 --ip 192.168.49.2 --volume addons-808918:/var --security-opt apparmor=unconfined --memory=4000mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723026928-19389@sha256:7715fe0c5dce35b4eb757765cbbe02d40cd8b5effa0639735e42ad89f4f51ef0
I0812 17:56:16.015921 390563 cli_runner.go:164] Run: docker container inspect addons-808918 --format={{.State.Running}}
I0812 17:56:16.033458 390563 cli_runner.go:164] Run: docker container inspect addons-808918 --format={{.State.Status}}
I0812 17:56:16.053348 390563 cli_runner.go:164] Run: docker exec addons-808918 stat /var/lib/dpkg/alternatives/iptables
I0812 17:56:16.094352 390563 oci.go:144] the created container "addons-808918" has a running status.
I0812 17:56:16.094385 390563 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/19419-381930/.minikube/machines/addons-808918/id_rsa...
I0812 17:56:16.276168 390563 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/19419-381930/.minikube/machines/addons-808918/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0812 17:56:16.301521 390563 cli_runner.go:164] Run: docker container inspect addons-808918 --format={{.State.Status}}
I0812 17:56:16.319647 390563 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0812 17:56:16.319671 390563 kic_runner.go:114] Args: [docker exec --privileged addons-808918 chown docker:docker /home/docker/.ssh/authorized_keys]
I0812 17:56:16.371961 390563 cli_runner.go:164] Run: docker container inspect addons-808918 --format={{.State.Status}}
I0812 17:56:16.391863 390563 machine.go:94] provisionDockerMachine start ...
I0812 17:56:16.391956 390563 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-808918
I0812 17:56:16.408780 390563 main.go:141] libmachine: Using SSH client type: native
I0812 17:56:16.408968 390563 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x82dc60] 0x8309c0 <nil> [] 0s} 127.0.0.1 33138 <nil> <nil>}
I0812 17:56:16.408980 390563 main.go:141] libmachine: About to run SSH command:
hostname
I0812 17:56:16.610080 390563 main.go:141] libmachine: SSH cmd err, output: <nil>: addons-808918
I0812 17:56:16.610109 390563 ubuntu.go:169] provisioning hostname "addons-808918"
I0812 17:56:16.610175 390563 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-808918
I0812 17:56:16.632065 390563 main.go:141] libmachine: Using SSH client type: native
I0812 17:56:16.632298 390563 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x82dc60] 0x8309c0 <nil> [] 0s} 127.0.0.1 33138 <nil> <nil>}
I0812 17:56:16.632319 390563 main.go:141] libmachine: About to run SSH command:
sudo hostname addons-808918 && echo "addons-808918" | sudo tee /etc/hostname
I0812 17:56:16.760558 390563 main.go:141] libmachine: SSH cmd err, output: <nil>: addons-808918
I0812 17:56:16.760642 390563 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-808918
I0812 17:56:16.777765 390563 main.go:141] libmachine: Using SSH client type: native
I0812 17:56:16.777982 390563 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x82dc60] 0x8309c0 <nil> [] 0s} 127.0.0.1 33138 <nil> <nil>}
I0812 17:56:16.778023 390563 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\saddons-808918' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 addons-808918/g' /etc/hosts;
else
echo '127.0.1.1 addons-808918' | sudo tee -a /etc/hosts;
fi
fi
I0812 17:56:16.893987 390563 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0812 17:56:16.894054 390563 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/19419-381930/.minikube CaCertPath:/home/jenkins/minikube-integration/19419-381930/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/19419-381930/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/19419-381930/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/19419-381930/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/19419-381930/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/19419-381930/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/19419-381930/.minikube}
I0812 17:56:16.894088 390563 ubuntu.go:177] setting up certificates
I0812 17:56:16.894101 390563 provision.go:84] configureAuth start
I0812 17:56:16.894157 390563 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-808918
I0812 17:56:16.910411 390563 provision.go:143] copyHostCerts
I0812 17:56:16.910487 390563 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19419-381930/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/19419-381930/.minikube/key.pem (1679 bytes)
I0812 17:56:16.910612 390563 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19419-381930/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/19419-381930/.minikube/ca.pem (1078 bytes)
I0812 17:56:16.910681 390563 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19419-381930/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/19419-381930/.minikube/cert.pem (1123 bytes)
I0812 17:56:16.910742 390563 provision.go:117] generating server cert: /home/jenkins/minikube-integration/19419-381930/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/19419-381930/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/19419-381930/.minikube/certs/ca-key.pem org=jenkins.addons-808918 san=[127.0.0.1 192.168.49.2 addons-808918 localhost minikube]
I0812 17:56:17.013322 390563 provision.go:177] copyRemoteCerts
I0812 17:56:17.013397 390563 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0812 17:56:17.013435 390563 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-808918
I0812 17:56:17.029449 390563 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33138 SSHKeyPath:/home/jenkins/minikube-integration/19419-381930/.minikube/machines/addons-808918/id_rsa Username:docker}
I0812 17:56:17.114216 390563 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19419-381930/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I0812 17:56:17.135140 390563 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19419-381930/.minikube/machines/server.pem --> /etc/docker/server.pem (1208 bytes)
I0812 17:56:17.156012 390563 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19419-381930/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I0812 17:56:17.176576 390563 provision.go:87] duration metric: took 282.461429ms to configureAuth
I0812 17:56:17.176604 390563 ubuntu.go:193] setting minikube options for container-runtime
I0812 17:56:17.176761 390563 config.go:182] Loaded profile config "addons-808918": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.30.3
I0812 17:56:17.176859 390563 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-808918
I0812 17:56:17.193216 390563 main.go:141] libmachine: Using SSH client type: native
I0812 17:56:17.193427 390563 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x82dc60] 0x8309c0 <nil> [] 0s} 127.0.0.1 33138 <nil> <nil>}
I0812 17:56:17.193450 390563 main.go:141] libmachine: About to run SSH command:
sudo mkdir -p /etc/sysconfig && printf %!s(MISSING) "
CRIO_MINIKUBE_OPTIONS='--insecure-registry 10.96.0.0/12 '
" | sudo tee /etc/sysconfig/crio.minikube && sudo systemctl restart crio
I0812 17:56:17.393782 390563 main.go:141] libmachine: SSH cmd err, output: <nil>:
CRIO_MINIKUBE_OPTIONS='--insecure-registry 10.96.0.0/12 '
I0812 17:56:17.393809 390563 machine.go:97] duration metric: took 1.001920745s to provisionDockerMachine
I0812 17:56:17.393822 390563 client.go:171] duration metric: took 9.968075622s to LocalClient.Create
I0812 17:56:17.393842 390563 start.go:167] duration metric: took 9.968138775s to libmachine.API.Create "addons-808918"
I0812 17:56:17.393853 390563 start.go:293] postStartSetup for "addons-808918" (driver="docker")
I0812 17:56:17.393865 390563 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0812 17:56:17.393922 390563 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0812 17:56:17.393963 390563 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-808918
I0812 17:56:17.411111 390563 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33138 SSHKeyPath:/home/jenkins/minikube-integration/19419-381930/.minikube/machines/addons-808918/id_rsa Username:docker}
I0812 17:56:17.502581 390563 ssh_runner.go:195] Run: cat /etc/os-release
I0812 17:56:17.505493 390563 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0812 17:56:17.505519 390563 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0812 17:56:17.505527 390563 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0812 17:56:17.505533 390563 info.go:137] Remote host: Ubuntu 22.04.4 LTS
I0812 17:56:17.505543 390563 filesync.go:126] Scanning /home/jenkins/minikube-integration/19419-381930/.minikube/addons for local assets ...
I0812 17:56:17.505589 390563 filesync.go:126] Scanning /home/jenkins/minikube-integration/19419-381930/.minikube/files for local assets ...
I0812 17:56:17.505610 390563 start.go:296] duration metric: took 111.75152ms for postStartSetup
I0812 17:56:17.505874 390563 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-808918
I0812 17:56:17.522249 390563 profile.go:143] Saving config to /home/jenkins/minikube-integration/19419-381930/.minikube/profiles/addons-808918/config.json ...
I0812 17:56:17.522480 390563 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0812 17:56:17.522524 390563 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-808918
I0812 17:56:17.538614 390563 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33138 SSHKeyPath:/home/jenkins/minikube-integration/19419-381930/.minikube/machines/addons-808918/id_rsa Username:docker}
I0812 17:56:17.626620 390563 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0812 17:56:17.630715 390563 start.go:128] duration metric: took 10.28816258s to createHost
I0812 17:56:17.630739 390563 start.go:83] releasing machines lock for "addons-808918", held for 10.288284956s
I0812 17:56:17.630819 390563 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-808918
I0812 17:56:17.647357 390563 ssh_runner.go:195] Run: cat /version.json
I0812 17:56:17.647401 390563 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0812 17:56:17.647472 390563 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-808918
I0812 17:56:17.647403 390563 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-808918
I0812 17:56:17.664797 390563 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33138 SSHKeyPath:/home/jenkins/minikube-integration/19419-381930/.minikube/machines/addons-808918/id_rsa Username:docker}
I0812 17:56:17.664891 390563 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33138 SSHKeyPath:/home/jenkins/minikube-integration/19419-381930/.minikube/machines/addons-808918/id_rsa Username:docker}
I0812 17:56:17.826521 390563 ssh_runner.go:195] Run: systemctl --version
I0812 17:56:17.830569 390563 ssh_runner.go:195] Run: sudo sh -c "podman version >/dev/null"
I0812 17:56:17.967458 390563 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0812 17:56:17.971790 390563 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0812 17:56:17.989096 390563 cni.go:221] loopback cni configuration disabled: "/etc/cni/net.d/*loopback.conf*" found
I0812 17:56:17.989171 390563 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%!p(MISSING), " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0812 17:56:18.014123 390563 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I0812 17:56:18.014147 390563 start.go:495] detecting cgroup driver to use...
I0812 17:56:18.014187 390563 detect.go:187] detected "cgroupfs" cgroup driver on host os
I0812 17:56:18.014241 390563 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
I0812 17:56:18.027916 390563 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I0812 17:56:18.037424 390563 docker.go:217] disabling cri-docker service (if available) ...
I0812 17:56:18.037465 390563 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I0812 17:56:18.049432 390563 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I0812 17:56:18.061783 390563 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I0812 17:56:18.134707 390563 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I0812 17:56:18.210374 390563 docker.go:233] disabling docker service ...
I0812 17:56:18.210423 390563 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I0812 17:56:18.227727 390563 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I0812 17:56:18.237653 390563 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I0812 17:56:18.309606 390563 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I0812 17:56:18.386717 390563 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I0812 17:56:18.396784 390563 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %!s(MISSING) "runtime-endpoint: unix:///var/run/crio/crio.sock
" | sudo tee /etc/crictl.yaml"
I0812 17:56:18.410599 390563 crio.go:59] configure cri-o to use "registry.k8s.io/pause:3.9" pause image...
I0812 17:56:18.410663 390563 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|^.*pause_image = .*$|pause_image = "registry.k8s.io/pause:3.9"|' /etc/crio/crio.conf.d/02-crio.conf"
I0812 17:56:18.419312 390563 crio.go:70] configuring cri-o to use "cgroupfs" as cgroup driver...
I0812 17:56:18.419366 390563 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|^.*cgroup_manager = .*$|cgroup_manager = "cgroupfs"|' /etc/crio/crio.conf.d/02-crio.conf"
I0812 17:56:18.427778 390563 ssh_runner.go:195] Run: sh -c "sudo sed -i '/conmon_cgroup = .*/d' /etc/crio/crio.conf.d/02-crio.conf"
I0812 17:56:18.435929 390563 ssh_runner.go:195] Run: sh -c "sudo sed -i '/cgroup_manager = .*/a conmon_cgroup = "pod"' /etc/crio/crio.conf.d/02-crio.conf"
I0812 17:56:18.444152 390563 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0812 17:56:18.452059 390563 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *"net.ipv4.ip_unprivileged_port_start=.*"/d' /etc/crio/crio.conf.d/02-crio.conf"
I0812 17:56:18.460474 390563 ssh_runner.go:195] Run: sh -c "sudo grep -q "^ *default_sysctls" /etc/crio/crio.conf.d/02-crio.conf || sudo sed -i '/conmon_cgroup = .*/a default_sysctls = \[\n\]' /etc/crio/crio.conf.d/02-crio.conf"
I0812 17:56:18.474176 390563 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^default_sysctls *= *\[|&\n "net.ipv4.ip_unprivileged_port_start=0",|' /etc/crio/crio.conf.d/02-crio.conf"
I0812 17:56:18.483012 390563 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0812 17:56:18.490218 390563 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0812 17:56:18.497637 390563 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0812 17:56:18.568494 390563 ssh_runner.go:195] Run: sudo systemctl restart crio
I0812 17:56:18.675528 390563 start.go:542] Will wait 60s for socket path /var/run/crio/crio.sock
I0812 17:56:18.675595 390563 ssh_runner.go:195] Run: stat /var/run/crio/crio.sock
I0812 17:56:18.678772 390563 start.go:563] Will wait 60s for crictl version
I0812 17:56:18.678824 390563 ssh_runner.go:195] Run: which crictl
I0812 17:56:18.681851 390563 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0812 17:56:18.713273 390563 start.go:579] Version: 0.1.0
RuntimeName: cri-o
RuntimeVersion: 1.24.6
RuntimeApiVersion: v1
I0812 17:56:18.713366 390563 ssh_runner.go:195] Run: crio --version
I0812 17:56:18.746739 390563 ssh_runner.go:195] Run: crio --version
I0812 17:56:18.781842 390563 out.go:177] * Preparing Kubernetes v1.30.3 on CRI-O 1.24.6 ...
I0812 17:56:18.783082 390563 cli_runner.go:164] Run: docker network inspect addons-808918 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0812 17:56:18.799168 390563 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I0812 17:56:18.802640 390563 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0812 17:56:18.812460 390563 kubeadm.go:883] updating cluster {Name:addons-808918 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723026928-19389@sha256:7715fe0c5dce35b4eb757765cbbe02d40cd8b5effa0639735e42ad89f4f51ef0 Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.30.3 ClusterName:addons-808918 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNa
mes:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.30.3 ContainerRuntime:crio ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmw
arePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I0812 17:56:18.812595 390563 preload.go:131] Checking if preload exists for k8s version v1.30.3 and runtime crio
I0812 17:56:18.812646 390563 ssh_runner.go:195] Run: sudo crictl images --output json
I0812 17:56:18.876512 390563 crio.go:514] all images are preloaded for cri-o runtime.
I0812 17:56:18.876536 390563 crio.go:433] Images already preloaded, skipping extraction
I0812 17:56:18.876594 390563 ssh_runner.go:195] Run: sudo crictl images --output json
I0812 17:56:18.907150 390563 crio.go:514] all images are preloaded for cri-o runtime.
I0812 17:56:18.907174 390563 cache_images.go:84] Images are preloaded, skipping loading
I0812 17:56:18.907182 390563 kubeadm.go:934] updating node { 192.168.49.2 8443 v1.30.3 crio true true} ...
I0812 17:56:18.907280 390563 kubeadm.go:946] kubelet [Unit]
Wants=crio.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.30.3/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroups-per-qos=false --config=/var/lib/kubelet/config.yaml --enforce-node-allocatable= --hostname-override=addons-808918 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.2
[Install]
config:
{KubernetesVersion:v1.30.3 ClusterName:addons-808918 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0812 17:56:18.907352 390563 ssh_runner.go:195] Run: crio config
I0812 17:56:18.947463 390563 cni.go:84] Creating CNI manager for ""
I0812 17:56:18.947488 390563 cni.go:143] "docker" driver + "crio" runtime found, recommending kindnet
I0812 17:56:18.947505 390563 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
I0812 17:56:18.947535 390563 kubeadm.go:181] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8443 KubernetesVersion:v1.30.3 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:addons-808918 NodeName:addons-808918 DNSDomain:cluster.local CRISocket:/var/run/crio/crio.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.49.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kuberne
tes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/crio/crio.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I0812 17:56:18.947711 390563 kubeadm.go:187] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.49.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/crio/crio.sock
name: "addons-808918"
kubeletExtraArgs:
node-ip: 192.168.49.2
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.30.3
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///var/run/crio/crio.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%!"(MISSING)
nodefs.inodesFree: "0%!"(MISSING)
imagefs.available: "0%!"(MISSING)
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I0812 17:56:18.947781 390563 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.30.3
I0812 17:56:18.955866 390563 binaries.go:44] Found k8s binaries, skipping transfer
I0812 17:56:18.955932 390563 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I0812 17:56:18.963307 390563 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (363 bytes)
I0812 17:56:18.978525 390563 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0812 17:56:18.993827 390563 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2151 bytes)
I0812 17:56:19.009653 390563 ssh_runner.go:195] Run: grep 192.168.49.2 control-plane.minikube.internal$ /etc/hosts
I0812 17:56:19.012540 390563 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0812 17:56:19.021691 390563 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0812 17:56:19.093370 390563 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0812 17:56:19.105790 390563 certs.go:68] Setting up /home/jenkins/minikube-integration/19419-381930/.minikube/profiles/addons-808918 for IP: 192.168.49.2
I0812 17:56:19.105823 390563 certs.go:194] generating shared ca certs ...
I0812 17:56:19.105849 390563 certs.go:226] acquiring lock for ca certs: {Name:mkfcc3edb334bd6ba47edb4b84a31a4a26ab9e3b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0812 17:56:19.105988 390563 certs.go:240] generating "minikubeCA" ca cert: /home/jenkins/minikube-integration/19419-381930/.minikube/ca.key
I0812 17:56:19.377820 390563 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19419-381930/.minikube/ca.crt ...
I0812 17:56:19.377849 390563 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19419-381930/.minikube/ca.crt: {Name:mkeda05fa0babcd952c1f2e6e3144f0ab76fa4b6 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0812 17:56:19.378016 390563 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19419-381930/.minikube/ca.key ...
I0812 17:56:19.378026 390563 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19419-381930/.minikube/ca.key: {Name:mk2a258b121dcb15f07f63ee449a96b49b9248c5 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0812 17:56:19.378101 390563 certs.go:240] generating "proxyClientCA" ca cert: /home/jenkins/minikube-integration/19419-381930/.minikube/proxy-client-ca.key
I0812 17:56:19.748556 390563 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19419-381930/.minikube/proxy-client-ca.crt ...
I0812 17:56:19.748587 390563 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19419-381930/.minikube/proxy-client-ca.crt: {Name:mk1bbe9a3c3bb2bc8f12d482a6247d9e5a47abe7 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0812 17:56:19.748764 390563 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19419-381930/.minikube/proxy-client-ca.key ...
I0812 17:56:19.748775 390563 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19419-381930/.minikube/proxy-client-ca.key: {Name:mk0049890c048abf67028d1ac867861b202c2026 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0812 17:56:19.748849 390563 certs.go:256] generating profile certs ...
I0812 17:56:19.748909 390563 certs.go:363] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/19419-381930/.minikube/profiles/addons-808918/client.key
I0812 17:56:19.748922 390563 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19419-381930/.minikube/profiles/addons-808918/client.crt with IP's: []
I0812 17:56:19.820828 390563 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19419-381930/.minikube/profiles/addons-808918/client.crt ...
I0812 17:56:19.820857 390563 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19419-381930/.minikube/profiles/addons-808918/client.crt: {Name:mk29f232b7a1267f4fde315773f21ca6a3196f3e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0812 17:56:19.821012 390563 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19419-381930/.minikube/profiles/addons-808918/client.key ...
I0812 17:56:19.821022 390563 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19419-381930/.minikube/profiles/addons-808918/client.key: {Name:mke354a2d2b366afa78b3f676e674282de431a95 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0812 17:56:19.821093 390563 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/19419-381930/.minikube/profiles/addons-808918/apiserver.key.cd07b36f
I0812 17:56:19.821112 390563 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19419-381930/.minikube/profiles/addons-808918/apiserver.crt.cd07b36f with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.49.2]
I0812 17:56:20.149861 390563 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19419-381930/.minikube/profiles/addons-808918/apiserver.crt.cd07b36f ...
I0812 17:56:20.149895 390563 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19419-381930/.minikube/profiles/addons-808918/apiserver.crt.cd07b36f: {Name:mkca23c14b0f92a18db50588b2a218264d50f2af Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0812 17:56:20.150074 390563 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19419-381930/.minikube/profiles/addons-808918/apiserver.key.cd07b36f ...
I0812 17:56:20.150090 390563 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19419-381930/.minikube/profiles/addons-808918/apiserver.key.cd07b36f: {Name:mk581a81fa7a19b1903219aac636b8c78ecd9ec2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0812 17:56:20.150163 390563 certs.go:381] copying /home/jenkins/minikube-integration/19419-381930/.minikube/profiles/addons-808918/apiserver.crt.cd07b36f -> /home/jenkins/minikube-integration/19419-381930/.minikube/profiles/addons-808918/apiserver.crt
I0812 17:56:20.150261 390563 certs.go:385] copying /home/jenkins/minikube-integration/19419-381930/.minikube/profiles/addons-808918/apiserver.key.cd07b36f -> /home/jenkins/minikube-integration/19419-381930/.minikube/profiles/addons-808918/apiserver.key
I0812 17:56:20.150313 390563 certs.go:363] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/19419-381930/.minikube/profiles/addons-808918/proxy-client.key
I0812 17:56:20.150332 390563 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19419-381930/.minikube/profiles/addons-808918/proxy-client.crt with IP's: []
I0812 17:56:20.270452 390563 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19419-381930/.minikube/profiles/addons-808918/proxy-client.crt ...
I0812 17:56:20.270483 390563 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19419-381930/.minikube/profiles/addons-808918/proxy-client.crt: {Name:mkfb0b5b713bcf4163196fcee996d1fc896c387a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0812 17:56:20.270646 390563 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19419-381930/.minikube/profiles/addons-808918/proxy-client.key ...
I0812 17:56:20.270659 390563 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19419-381930/.minikube/profiles/addons-808918/proxy-client.key: {Name:mk45b96dbe2be664415d2b833eedbd657d903989 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0812 17:56:20.270839 390563 certs.go:484] found cert: /home/jenkins/minikube-integration/19419-381930/.minikube/certs/ca-key.pem (1675 bytes)
I0812 17:56:20.270878 390563 certs.go:484] found cert: /home/jenkins/minikube-integration/19419-381930/.minikube/certs/ca.pem (1078 bytes)
I0812 17:56:20.270904 390563 certs.go:484] found cert: /home/jenkins/minikube-integration/19419-381930/.minikube/certs/cert.pem (1123 bytes)
I0812 17:56:20.270929 390563 certs.go:484] found cert: /home/jenkins/minikube-integration/19419-381930/.minikube/certs/key.pem (1679 bytes)
I0812 17:56:20.271516 390563 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19419-381930/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0812 17:56:20.293105 390563 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19419-381930/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I0812 17:56:20.313936 390563 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19419-381930/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0812 17:56:20.334140 390563 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19419-381930/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I0812 17:56:20.354920 390563 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19419-381930/.minikube/profiles/addons-808918/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1419 bytes)
I0812 17:56:20.375496 390563 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19419-381930/.minikube/profiles/addons-808918/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I0812 17:56:20.395957 390563 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19419-381930/.minikube/profiles/addons-808918/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0812 17:56:20.416354 390563 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19419-381930/.minikube/profiles/addons-808918/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I0812 17:56:20.436971 390563 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19419-381930/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0812 17:56:20.457754 390563 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I0812 17:56:20.472934 390563 ssh_runner.go:195] Run: openssl version
I0812 17:56:20.477813 390563 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0812 17:56:20.486073 390563 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0812 17:56:20.489032 390563 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Aug 12 17:56 /usr/share/ca-certificates/minikubeCA.pem
I0812 17:56:20.489087 390563 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0812 17:56:20.495210 390563 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0812 17:56:20.503428 390563 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0812 17:56:20.506239 390563 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I0812 17:56:20.506300 390563 kubeadm.go:392] StartCluster: {Name:addons-808918 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723026928-19389@sha256:7715fe0c5dce35b4eb757765cbbe02d40cd8b5effa0639735e42ad89f4f51ef0 Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.30.3 ClusterName:addons-808918 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames
:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.30.3 ContainerRuntime:crio ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmware
Path: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0812 17:56:20.506385 390563 cri.go:54] listing CRI containers in root : {State:paused Name: Namespaces:[kube-system]}
I0812 17:56:20.506441 390563 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I0812 17:56:20.538350 390563 cri.go:89] found id: ""
I0812 17:56:20.538424 390563 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I0812 17:56:20.546466 390563 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I0812 17:56:20.554247 390563 kubeadm.go:214] ignoring SystemVerification for kubeadm because of docker driver
I0812 17:56:20.554300 390563 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I0812 17:56:20.561934 390563 kubeadm.go:155] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I0812 17:56:20.561974 390563 kubeadm.go:157] found existing configuration files:
I0812 17:56:20.562063 390563 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I0812 17:56:20.570133 390563 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I0812 17:56:20.570187 390563 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I0812 17:56:20.577252 390563 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I0812 17:56:20.584528 390563 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I0812 17:56:20.584580 390563 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I0812 17:56:20.591522 390563 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I0812 17:56:20.598844 390563 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I0812 17:56:20.598891 390563 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I0812 17:56:20.605968 390563 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I0812 17:56:20.613268 390563 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I0812 17:56:20.613309 390563 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I0812 17:56:20.620346 390563 ssh_runner.go:286] Start: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.30.3:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I0812 17:56:20.703022 390563 kubeadm.go:310] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1066-gcp\n", err: exit status 1
I0812 17:56:20.755299 390563 kubeadm.go:310] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I0812 17:56:30.527615 390563 kubeadm.go:310] [init] Using Kubernetes version: v1.30.3
I0812 17:56:30.527666 390563 kubeadm.go:310] [preflight] Running pre-flight checks
I0812 17:56:30.527784 390563 kubeadm.go:310] [preflight] The system verification failed. Printing the output from the verification:
I0812 17:56:30.527885 390563 kubeadm.go:310] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1066-gcp[0m
I0812 17:56:30.527935 390563 kubeadm.go:310] [0;37mOS[0m: [0;32mLinux[0m
I0812 17:56:30.528000 390563 kubeadm.go:310] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I0812 17:56:30.528064 390563 kubeadm.go:310] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I0812 17:56:30.528132 390563 kubeadm.go:310] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I0812 17:56:30.528195 390563 kubeadm.go:310] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I0812 17:56:30.528264 390563 kubeadm.go:310] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I0812 17:56:30.528328 390563 kubeadm.go:310] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I0812 17:56:30.528399 390563 kubeadm.go:310] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I0812 17:56:30.528470 390563 kubeadm.go:310] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I0812 17:56:30.528543 390563 kubeadm.go:310] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I0812 17:56:30.528640 390563 kubeadm.go:310] [preflight] Pulling images required for setting up a Kubernetes cluster
I0812 17:56:30.528771 390563 kubeadm.go:310] [preflight] This might take a minute or two, depending on the speed of your internet connection
I0812 17:56:30.528907 390563 kubeadm.go:310] [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
I0812 17:56:30.529016 390563 kubeadm.go:310] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I0812 17:56:30.530714 390563 out.go:204] - Generating certificates and keys ...
I0812 17:56:30.530784 390563 kubeadm.go:310] [certs] Using existing ca certificate authority
I0812 17:56:30.530837 390563 kubeadm.go:310] [certs] Using existing apiserver certificate and key on disk
I0812 17:56:30.530897 390563 kubeadm.go:310] [certs] Generating "apiserver-kubelet-client" certificate and key
I0812 17:56:30.530949 390563 kubeadm.go:310] [certs] Generating "front-proxy-ca" certificate and key
I0812 17:56:30.531002 390563 kubeadm.go:310] [certs] Generating "front-proxy-client" certificate and key
I0812 17:56:30.531081 390563 kubeadm.go:310] [certs] Generating "etcd/ca" certificate and key
I0812 17:56:30.531172 390563 kubeadm.go:310] [certs] Generating "etcd/server" certificate and key
I0812 17:56:30.531329 390563 kubeadm.go:310] [certs] etcd/server serving cert is signed for DNS names [addons-808918 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0812 17:56:30.531414 390563 kubeadm.go:310] [certs] Generating "etcd/peer" certificate and key
I0812 17:56:30.531553 390563 kubeadm.go:310] [certs] etcd/peer serving cert is signed for DNS names [addons-808918 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0812 17:56:30.531629 390563 kubeadm.go:310] [certs] Generating "etcd/healthcheck-client" certificate and key
I0812 17:56:30.531711 390563 kubeadm.go:310] [certs] Generating "apiserver-etcd-client" certificate and key
I0812 17:56:30.531779 390563 kubeadm.go:310] [certs] Generating "sa" key and public key
I0812 17:56:30.531869 390563 kubeadm.go:310] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I0812 17:56:30.531918 390563 kubeadm.go:310] [kubeconfig] Writing "admin.conf" kubeconfig file
I0812 17:56:30.531970 390563 kubeadm.go:310] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I0812 17:56:30.532021 390563 kubeadm.go:310] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I0812 17:56:30.532076 390563 kubeadm.go:310] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I0812 17:56:30.532137 390563 kubeadm.go:310] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I0812 17:56:30.532203 390563 kubeadm.go:310] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I0812 17:56:30.532262 390563 kubeadm.go:310] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I0812 17:56:30.533558 390563 out.go:204] - Booting up control plane ...
I0812 17:56:30.533623 390563 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-apiserver"
I0812 17:56:30.533695 390563 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I0812 17:56:30.533774 390563 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-scheduler"
I0812 17:56:30.533877 390563 kubeadm.go:310] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I0812 17:56:30.533949 390563 kubeadm.go:310] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I0812 17:56:30.533983 390563 kubeadm.go:310] [kubelet-start] Starting the kubelet
I0812 17:56:30.534115 390563 kubeadm.go:310] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I0812 17:56:30.534186 390563 kubeadm.go:310] [kubelet-check] Waiting for a healthy kubelet. This can take up to 4m0s
I0812 17:56:30.534235 390563 kubeadm.go:310] [kubelet-check] The kubelet is healthy after 501.58874ms
I0812 17:56:30.534298 390563 kubeadm.go:310] [api-check] Waiting for a healthy API server. This can take up to 4m0s
I0812 17:56:30.534346 390563 kubeadm.go:310] [api-check] The API server is healthy after 4.501957825s
I0812 17:56:30.534433 390563 kubeadm.go:310] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I0812 17:56:30.534575 390563 kubeadm.go:310] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I0812 17:56:30.534625 390563 kubeadm.go:310] [upload-certs] Skipping phase. Please see --upload-certs
I0812 17:56:30.534779 390563 kubeadm.go:310] [mark-control-plane] Marking the node addons-808918 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I0812 17:56:30.534826 390563 kubeadm.go:310] [bootstrap-token] Using token: c2l3yx.cmknwj5t5rosm03q
I0812 17:56:30.536216 390563 out.go:204] - Configuring RBAC rules ...
I0812 17:56:30.536310 390563 kubeadm.go:310] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I0812 17:56:30.536380 390563 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I0812 17:56:30.536525 390563 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I0812 17:56:30.536684 390563 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I0812 17:56:30.536803 390563 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I0812 17:56:30.536926 390563 kubeadm.go:310] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I0812 17:56:30.537047 390563 kubeadm.go:310] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I0812 17:56:30.537115 390563 kubeadm.go:310] [addons] Applied essential addon: CoreDNS
I0812 17:56:30.537173 390563 kubeadm.go:310] [addons] Applied essential addon: kube-proxy
I0812 17:56:30.537182 390563 kubeadm.go:310]
I0812 17:56:30.537258 390563 kubeadm.go:310] Your Kubernetes control-plane has initialized successfully!
I0812 17:56:30.537271 390563 kubeadm.go:310]
I0812 17:56:30.537385 390563 kubeadm.go:310] To start using your cluster, you need to run the following as a regular user:
I0812 17:56:30.537394 390563 kubeadm.go:310]
I0812 17:56:30.537429 390563 kubeadm.go:310] mkdir -p $HOME/.kube
I0812 17:56:30.537505 390563 kubeadm.go:310] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I0812 17:56:30.537576 390563 kubeadm.go:310] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I0812 17:56:30.537584 390563 kubeadm.go:310]
I0812 17:56:30.537652 390563 kubeadm.go:310] Alternatively, if you are the root user, you can run:
I0812 17:56:30.537665 390563 kubeadm.go:310]
I0812 17:56:30.537734 390563 kubeadm.go:310] export KUBECONFIG=/etc/kubernetes/admin.conf
I0812 17:56:30.537745 390563 kubeadm.go:310]
I0812 17:56:30.537827 390563 kubeadm.go:310] You should now deploy a pod network to the cluster.
I0812 17:56:30.537891 390563 kubeadm.go:310] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I0812 17:56:30.537957 390563 kubeadm.go:310] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I0812 17:56:30.537966 390563 kubeadm.go:310]
I0812 17:56:30.538079 390563 kubeadm.go:310] You can now join any number of control-plane nodes by copying certificate authorities
I0812 17:56:30.538145 390563 kubeadm.go:310] and service account keys on each node and then running the following as root:
I0812 17:56:30.538151 390563 kubeadm.go:310]
I0812 17:56:30.538229 390563 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token c2l3yx.cmknwj5t5rosm03q \
I0812 17:56:30.538379 390563 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:863162be5a9ca86adb2b4a18d5f9c3a0664e7b9490b407e2f2c24b387ddd2d27 \
I0812 17:56:30.538399 390563 kubeadm.go:310] --control-plane
I0812 17:56:30.538405 390563 kubeadm.go:310]
I0812 17:56:30.538477 390563 kubeadm.go:310] Then you can join any number of worker nodes by running the following on each as root:
I0812 17:56:30.538482 390563 kubeadm.go:310]
I0812 17:56:30.538554 390563 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token c2l3yx.cmknwj5t5rosm03q \
I0812 17:56:30.538661 390563 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:863162be5a9ca86adb2b4a18d5f9c3a0664e7b9490b407e2f2c24b387ddd2d27
I0812 17:56:30.538675 390563 cni.go:84] Creating CNI manager for ""
I0812 17:56:30.538683 390563 cni.go:143] "docker" driver + "crio" runtime found, recommending kindnet
I0812 17:56:30.540141 390563 out.go:177] * Configuring CNI (Container Networking Interface) ...
I0812 17:56:30.541500 390563 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I0812 17:56:30.545301 390563 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.30.3/kubectl ...
I0812 17:56:30.545321 390563 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2438 bytes)
I0812 17:56:30.561561 390563 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.3/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I0812 17:56:30.742826 390563 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I0812 17:56:30.742915 390563 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.3/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes addons-808918 minikube.k8s.io/updated_at=2024_08_12T17_56_30_0700 minikube.k8s.io/version=v1.33.1 minikube.k8s.io/commit=cef58b3fe6f92de527946427fe0cca1d2a7a15cc minikube.k8s.io/name=addons-808918 minikube.k8s.io/primary=true
I0812 17:56:30.742915 390563 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.3/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I0812 17:56:30.864108 390563 ops.go:34] apiserver oom_adj: -16
I0812 17:56:30.864224 390563 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0812 17:56:31.364755 390563 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0812 17:56:31.864736 390563 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0812 17:56:32.364322 390563 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0812 17:56:32.865118 390563 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0812 17:56:33.364877 390563 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0812 17:56:33.865206 390563 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0812 17:56:34.364786 390563 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0812 17:56:34.864915 390563 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0812 17:56:35.365101 390563 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0812 17:56:35.865123 390563 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0812 17:56:36.365315 390563 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0812 17:56:36.865212 390563 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0812 17:56:37.364901 390563 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0812 17:56:37.865029 390563 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0812 17:56:38.364924 390563 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0812 17:56:38.864447 390563 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0812 17:56:39.365255 390563 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0812 17:56:39.864395 390563 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0812 17:56:40.364946 390563 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0812 17:56:40.864297 390563 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0812 17:56:41.365076 390563 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0812 17:56:41.864712 390563 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0812 17:56:42.365224 390563 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0812 17:56:42.865192 390563 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0812 17:56:43.365197 390563 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0812 17:56:43.864351 390563 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.3/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0812 17:56:43.928842 390563 kubeadm.go:1113] duration metric: took 13.186003823s to wait for elevateKubeSystemPrivileges
I0812 17:56:43.928889 390563 kubeadm.go:394] duration metric: took 23.422595653s to StartCluster
I0812 17:56:43.928929 390563 settings.go:142] acquiring lock: {Name:mke27b0d1485c5ddce671ff21d1ff05eaade07c9 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0812 17:56:43.929071 390563 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/19419-381930/kubeconfig
I0812 17:56:43.929473 390563 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19419-381930/kubeconfig: {Name:mkf6da54cf5e65ae61df58077fa331943fa67f5c Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0812 17:56:43.929691 390563 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.30.3/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I0812 17:56:43.929708 390563 start.go:235] Will wait 6m0s for node &{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.30.3 ContainerRuntime:crio ControlPlane:true Worker:true}
I0812 17:56:43.929796 390563 addons.go:507] enable addons start: toEnable=map[ambassador:false auto-pause:false cloud-spanner:true csi-hostpath-driver:true dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:true gvisor:false headlamp:false helm-tiller:true inaccel:false ingress:true ingress-dns:true inspektor-gadget:true istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:true nvidia-device-plugin:true nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:true registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:true volcano:true volumesnapshots:true yakd:true]
I0812 17:56:43.929937 390563 addons.go:69] Setting yakd=true in profile "addons-808918"
I0812 17:56:43.929935 390563 addons.go:69] Setting gcp-auth=true in profile "addons-808918"
I0812 17:56:43.929954 390563 config.go:182] Loaded profile config "addons-808918": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.30.3
I0812 17:56:43.929953 390563 addons.go:69] Setting nvidia-device-plugin=true in profile "addons-808918"
I0812 17:56:43.929969 390563 addons.go:69] Setting cloud-spanner=true in profile "addons-808918"
I0812 17:56:43.929986 390563 addons.go:234] Setting addon yakd=true in "addons-808918"
I0812 17:56:43.929994 390563 mustload.go:65] Loading cluster: addons-808918
I0812 17:56:43.929996 390563 addons.go:69] Setting default-storageclass=true in profile "addons-808918"
I0812 17:56:43.930020 390563 addons.go:234] Setting addon nvidia-device-plugin=true in "addons-808918"
I0812 17:56:43.930028 390563 addons.go:69] Setting ingress-dns=true in profile "addons-808918"
I0812 17:56:43.930049 390563 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "addons-808918"
I0812 17:56:43.930058 390563 host.go:66] Checking if "addons-808918" exists ...
I0812 17:56:43.930060 390563 addons.go:69] Setting ingress=true in profile "addons-808918"
I0812 17:56:43.930064 390563 host.go:66] Checking if "addons-808918" exists ...
I0812 17:56:43.930079 390563 addons.go:234] Setting addon ingress=true in "addons-808918"
I0812 17:56:43.930119 390563 host.go:66] Checking if "addons-808918" exists ...
I0812 17:56:43.930050 390563 addons.go:234] Setting addon ingress-dns=true in "addons-808918"
I0812 17:56:43.930179 390563 host.go:66] Checking if "addons-808918" exists ...
I0812 17:56:43.930212 390563 config.go:182] Loaded profile config "addons-808918": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.30.3
I0812 17:56:43.930414 390563 addons.go:69] Setting storage-provisioner-rancher=true in profile "addons-808918"
I0812 17:56:43.930443 390563 addons_storage_classes.go:33] enableOrDisableStorageClasses storage-provisioner-rancher=true on "addons-808918"
I0812 17:56:43.930460 390563 cli_runner.go:164] Run: docker container inspect addons-808918 --format={{.State.Status}}
I0812 17:56:43.930489 390563 addons.go:69] Setting metrics-server=true in profile "addons-808918"
I0812 17:56:43.930515 390563 addons.go:234] Setting addon metrics-server=true in "addons-808918"
I0812 17:56:43.930538 390563 host.go:66] Checking if "addons-808918" exists ...
I0812 17:56:43.930617 390563 cli_runner.go:164] Run: docker container inspect addons-808918 --format={{.State.Status}}
I0812 17:56:43.930629 390563 addons.go:69] Setting registry=true in profile "addons-808918"
I0812 17:56:43.930657 390563 addons.go:234] Setting addon registry=true in "addons-808918"
I0812 17:56:43.930648 390563 addons.go:69] Setting volcano=true in profile "addons-808918"
I0812 17:56:43.930681 390563 host.go:66] Checking if "addons-808918" exists ...
I0812 17:56:43.930707 390563 addons.go:234] Setting addon volcano=true in "addons-808918"
I0812 17:56:43.930715 390563 addons.go:69] Setting storage-provisioner=true in profile "addons-808918"
I0812 17:56:43.930739 390563 addons.go:234] Setting addon storage-provisioner=true in "addons-808918"
I0812 17:56:43.930743 390563 host.go:66] Checking if "addons-808918" exists ...
I0812 17:56:43.930761 390563 host.go:66] Checking if "addons-808918" exists ...
I0812 17:56:43.930837 390563 cli_runner.go:164] Run: docker container inspect addons-808918 --format={{.State.Status}}
I0812 17:56:43.930930 390563 cli_runner.go:164] Run: docker container inspect addons-808918 --format={{.State.Status}}
I0812 17:56:43.930021 390563 addons.go:234] Setting addon cloud-spanner=true in "addons-808918"
I0812 17:56:43.931153 390563 host.go:66] Checking if "addons-808918" exists ...
I0812 17:56:43.931161 390563 cli_runner.go:164] Run: docker container inspect addons-808918 --format={{.State.Status}}
I0812 17:56:43.931181 390563 cli_runner.go:164] Run: docker container inspect addons-808918 --format={{.State.Status}}
I0812 17:56:43.931192 390563 addons.go:69] Setting volumesnapshots=true in profile "addons-808918"
I0812 17:56:43.931217 390563 addons.go:234] Setting addon volumesnapshots=true in "addons-808918"
I0812 17:56:43.931243 390563 host.go:66] Checking if "addons-808918" exists ...
I0812 17:56:43.931259 390563 cli_runner.go:164] Run: docker container inspect addons-808918 --format={{.State.Status}}
I0812 17:56:43.931589 390563 cli_runner.go:164] Run: docker container inspect addons-808918 --format={{.State.Status}}
I0812 17:56:43.931652 390563 cli_runner.go:164] Run: docker container inspect addons-808918 --format={{.State.Status}}
I0812 17:56:43.932167 390563 cli_runner.go:164] Run: docker container inspect addons-808918 --format={{.State.Status}}
I0812 17:56:43.933657 390563 addons.go:69] Setting inspektor-gadget=true in profile "addons-808918"
I0812 17:56:43.933699 390563 addons.go:234] Setting addon inspektor-gadget=true in "addons-808918"
I0812 17:56:43.933733 390563 host.go:66] Checking if "addons-808918" exists ...
I0812 17:56:43.934225 390563 cli_runner.go:164] Run: docker container inspect addons-808918 --format={{.State.Status}}
I0812 17:56:43.937484 390563 out.go:177] * Verifying Kubernetes components...
I0812 17:56:43.930055 390563 addons.go:69] Setting helm-tiller=true in profile "addons-808918"
I0812 17:56:43.937623 390563 addons.go:234] Setting addon helm-tiller=true in "addons-808918"
I0812 17:56:43.930659 390563 cli_runner.go:164] Run: docker container inspect addons-808918 --format={{.State.Status}}
I0812 17:56:43.938544 390563 host.go:66] Checking if "addons-808918" exists ...
I0812 17:56:43.930617 390563 cli_runner.go:164] Run: docker container inspect addons-808918 --format={{.State.Status}}
I0812 17:56:43.929952 390563 addons.go:69] Setting csi-hostpath-driver=true in profile "addons-808918"
I0812 17:56:43.944926 390563 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0812 17:56:43.944943 390563 addons.go:234] Setting addon csi-hostpath-driver=true in "addons-808918"
I0812 17:56:43.944995 390563 host.go:66] Checking if "addons-808918" exists ...
I0812 17:56:43.945382 390563 cli_runner.go:164] Run: docker container inspect addons-808918 --format={{.State.Status}}
I0812 17:56:43.945493 390563 cli_runner.go:164] Run: docker container inspect addons-808918 --format={{.State.Status}}
I0812 17:56:43.930708 390563 cli_runner.go:164] Run: docker container inspect addons-808918 --format={{.State.Status}}
I0812 17:56:43.974477 390563 out.go:177] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I0812 17:56:43.974582 390563 out.go:177] - Using image docker.io/registry:2.8.3
I0812 17:56:43.976251 390563 addons.go:431] installing /etc/kubernetes/addons/storage-provisioner.yaml
I0812 17:56:43.976276 390563 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I0812 17:56:43.976343 390563 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-808918
I0812 17:56:43.980258 390563 out.go:177] - Using image gcr.io/k8s-minikube/kube-registry-proxy:0.0.6
I0812 17:56:43.982203 390563 out.go:177] - Using image gcr.io/k8s-minikube/minikube-ingress-dns:0.0.3
I0812 17:56:43.983145 390563 addons.go:431] installing /etc/kubernetes/addons/registry-rc.yaml
I0812 17:56:43.983170 390563 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-rc.yaml (860 bytes)
I0812 17:56:43.983253 390563 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-808918
I0812 17:56:43.984242 390563 addons.go:431] installing /etc/kubernetes/addons/ingress-dns-pod.yaml
I0812 17:56:43.984258 390563 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ingress-dns-pod.yaml (2442 bytes)
I0812 17:56:43.984315 390563 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-808918
I0812 17:56:43.988945 390563 out.go:177] - Using image registry.k8s.io/sig-storage/snapshot-controller:v6.1.0
I0812 17:56:43.989118 390563 out.go:177] - Using image gcr.io/cloud-spanner-emulator/emulator:1.5.22
I0812 17:56:43.990326 390563 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml
I0812 17:56:43.990392 390563 ssh_runner.go:362] scp volumesnapshots/csi-hostpath-snapshotclass.yaml --> /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml (934 bytes)
I0812 17:56:43.990531 390563 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.1
I0812 17:56:43.990612 390563 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-808918
I0812 17:56:43.990971 390563 addons.go:431] installing /etc/kubernetes/addons/deployment.yaml
I0812 17:56:43.990988 390563 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/deployment.yaml (1004 bytes)
I0812 17:56:43.991037 390563 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-808918
I0812 17:56:43.993513 390563 host.go:66] Checking if "addons-808918" exists ...
I0812 17:56:43.994039 390563 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.1
I0812 17:56:43.998844 390563 out.go:177] - Using image registry.k8s.io/metrics-server/metrics-server:v0.7.1
I0812 17:56:44.001932 390563 addons.go:431] installing /etc/kubernetes/addons/metrics-apiservice.yaml
I0812 17:56:44.001961 390563 ssh_runner.go:362] scp metrics-server/metrics-apiservice.yaml --> /etc/kubernetes/addons/metrics-apiservice.yaml (424 bytes)
I0812 17:56:44.002039 390563 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-808918
I0812 17:56:44.002244 390563 out.go:177] - Using image ghcr.io/helm/tiller:v2.17.0
I0812 17:56:44.003500 390563 out.go:177] - Using image registry.k8s.io/ingress-nginx/controller:v1.11.1
I0812 17:56:44.003665 390563 addons.go:431] installing /etc/kubernetes/addons/helm-tiller-dp.yaml
I0812 17:56:44.003678 390563 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/helm-tiller-dp.yaml (2422 bytes)
I0812 17:56:44.003726 390563 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-808918
I0812 17:56:44.005983 390563 out.go:177] - Using image nvcr.io/nvidia/k8s-device-plugin:v0.16.2
I0812 17:56:44.011893 390563 addons.go:431] installing /etc/kubernetes/addons/nvidia-device-plugin.yaml
I0812 17:56:44.011913 390563 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/nvidia-device-plugin.yaml (1966 bytes)
I0812 17:56:44.011972 390563 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-808918
I0812 17:56:44.012415 390563 addons.go:431] installing /etc/kubernetes/addons/ingress-deploy.yaml
I0812 17:56:44.012452 390563 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ingress-deploy.yaml (16078 bytes)
I0812 17:56:44.012522 390563 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-808918
I0812 17:56:44.021811 390563 out.go:177] - Using image registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0
I0812 17:56:44.023877 390563 addons.go:234] Setting addon default-storageclass=true in "addons-808918"
I0812 17:56:44.023922 390563 host.go:66] Checking if "addons-808918" exists ...
I0812 17:56:44.024394 390563 cli_runner.go:164] Run: docker container inspect addons-808918 --format={{.State.Status}}
I0812 17:56:44.026680 390563 out.go:177] - Using image registry.k8s.io/sig-storage/csi-provisioner:v3.3.0
I0812 17:56:44.028117 390563 out.go:177] - Using image registry.k8s.io/sig-storage/csi-attacher:v4.0.0
I0812 17:56:44.029267 390563 out.go:177] - Using image registry.k8s.io/sig-storage/csi-external-health-monitor-controller:v0.7.0
I0812 17:56:44.030697 390563 out.go:177] - Using image registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.6.0
I0812 17:56:44.030979 390563 addons.go:234] Setting addon storage-provisioner-rancher=true in "addons-808918"
I0812 17:56:44.031026 390563 host.go:66] Checking if "addons-808918" exists ...
I0812 17:56:44.031500 390563 cli_runner.go:164] Run: docker container inspect addons-808918 --format={{.State.Status}}
I0812 17:56:44.032876 390563 out.go:177] - Using image registry.k8s.io/sig-storage/hostpathplugin:v1.9.0
I0812 17:56:44.033004 390563 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33138 SSHKeyPath:/home/jenkins/minikube-integration/19419-381930/.minikube/machines/addons-808918/id_rsa Username:docker}
I0812 17:56:44.035116 390563 out.go:177] - Using image registry.k8s.io/sig-storage/livenessprobe:v2.8.0
I0812 17:56:44.035119 390563 out.go:177] - Using image docker.io/marcnuri/yakd:0.0.5
I0812 17:56:44.036338 390563 addons.go:431] installing /etc/kubernetes/addons/yakd-ns.yaml
I0812 17:56:44.036368 390563 ssh_runner.go:362] scp yakd/yakd-ns.yaml --> /etc/kubernetes/addons/yakd-ns.yaml (171 bytes)
I0812 17:56:44.036531 390563 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-808918
I0812 17:56:44.037550 390563 out.go:177] - Using image registry.k8s.io/sig-storage/csi-resizer:v1.6.0
I0812 17:56:44.038891 390563 addons.go:431] installing /etc/kubernetes/addons/rbac-external-attacher.yaml
I0812 17:56:44.038914 390563 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-attacher.yaml --> /etc/kubernetes/addons/rbac-external-attacher.yaml (3073 bytes)
I0812 17:56:44.038972 390563 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-808918
W0812 17:56:44.041978 390563 out.go:239] ! Enabling 'volcano' returned an error: running callbacks: [volcano addon does not support crio]
I0812 17:56:44.046454 390563 out.go:177] - Using image ghcr.io/inspektor-gadget/inspektor-gadget:v0.31.0
I0812 17:56:44.048105 390563 addons.go:431] installing /etc/kubernetes/addons/ig-namespace.yaml
I0812 17:56:44.048127 390563 ssh_runner.go:362] scp inspektor-gadget/ig-namespace.yaml --> /etc/kubernetes/addons/ig-namespace.yaml (55 bytes)
I0812 17:56:44.048199 390563 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-808918
I0812 17:56:44.061718 390563 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33138 SSHKeyPath:/home/jenkins/minikube-integration/19419-381930/.minikube/machines/addons-808918/id_rsa Username:docker}
I0812 17:56:44.063547 390563 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33138 SSHKeyPath:/home/jenkins/minikube-integration/19419-381930/.minikube/machines/addons-808918/id_rsa Username:docker}
I0812 17:56:44.070197 390563 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33138 SSHKeyPath:/home/jenkins/minikube-integration/19419-381930/.minikube/machines/addons-808918/id_rsa Username:docker}
I0812 17:56:44.071703 390563 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33138 SSHKeyPath:/home/jenkins/minikube-integration/19419-381930/.minikube/machines/addons-808918/id_rsa Username:docker}
I0812 17:56:44.082272 390563 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33138 SSHKeyPath:/home/jenkins/minikube-integration/19419-381930/.minikube/machines/addons-808918/id_rsa Username:docker}
I0812 17:56:44.082272 390563 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33138 SSHKeyPath:/home/jenkins/minikube-integration/19419-381930/.minikube/machines/addons-808918/id_rsa Username:docker}
I0812 17:56:44.083362 390563 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33138 SSHKeyPath:/home/jenkins/minikube-integration/19419-381930/.minikube/machines/addons-808918/id_rsa Username:docker}
I0812 17:56:44.084442 390563 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33138 SSHKeyPath:/home/jenkins/minikube-integration/19419-381930/.minikube/machines/addons-808918/id_rsa Username:docker}
I0812 17:56:44.099801 390563 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33138 SSHKeyPath:/home/jenkins/minikube-integration/19419-381930/.minikube/machines/addons-808918/id_rsa Username:docker}
I0812 17:56:44.102998 390563 out.go:177] - Using image docker.io/busybox:stable
I0812 17:56:44.103486 390563 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33138 SSHKeyPath:/home/jenkins/minikube-integration/19419-381930/.minikube/machines/addons-808918/id_rsa Username:docker}
I0812 17:56:44.105270 390563 out.go:177] - Using image docker.io/rancher/local-path-provisioner:v0.0.22
I0812 17:56:44.105515 390563 addons.go:431] installing /etc/kubernetes/addons/storageclass.yaml
I0812 17:56:44.105533 390563 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I0812 17:56:44.105587 390563 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-808918
I0812 17:56:44.106561 390563 addons.go:431] installing /etc/kubernetes/addons/storage-provisioner-rancher.yaml
I0812 17:56:44.106579 390563 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner-rancher.yaml (3113 bytes)
I0812 17:56:44.106630 390563 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-808918
I0812 17:56:44.113466 390563 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33138 SSHKeyPath:/home/jenkins/minikube-integration/19419-381930/.minikube/machines/addons-808918/id_rsa Username:docker}
I0812 17:56:44.122765 390563 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33138 SSHKeyPath:/home/jenkins/minikube-integration/19419-381930/.minikube/machines/addons-808918/id_rsa Username:docker}
I0812 17:56:44.123699 390563 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33138 SSHKeyPath:/home/jenkins/minikube-integration/19419-381930/.minikube/machines/addons-808918/id_rsa Username:docker}
I0812 17:56:44.161159 390563 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.30.3/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.30.3/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I0812 17:56:44.465350 390563 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0812 17:56:44.465668 390563 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.3/kubectl apply -f /etc/kubernetes/addons/ingress-deploy.yaml
I0812 17:56:44.467801 390563 addons.go:431] installing /etc/kubernetes/addons/helm-tiller-rbac.yaml
I0812 17:56:44.467823 390563 ssh_runner.go:362] scp helm-tiller/helm-tiller-rbac.yaml --> /etc/kubernetes/addons/helm-tiller-rbac.yaml (1188 bytes)
I0812 17:56:44.553356 390563 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.3/kubectl apply -f /etc/kubernetes/addons/ingress-dns-pod.yaml
I0812 17:56:44.554711 390563 addons.go:431] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml
I0812 17:56:44.554781 390563 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshotclasses.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml (6471 bytes)
I0812 17:56:44.647906 390563 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.3/kubectl apply -f /etc/kubernetes/addons/storage-provisioner-rancher.yaml
I0812 17:56:44.651724 390563 addons.go:431] installing /etc/kubernetes/addons/metrics-server-deployment.yaml
I0812 17:56:44.651811 390563 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-deployment.yaml (1907 bytes)
I0812 17:56:44.653931 390563 addons.go:431] installing /etc/kubernetes/addons/registry-svc.yaml
I0812 17:56:44.653961 390563 ssh_runner.go:362] scp registry/registry-svc.yaml --> /etc/kubernetes/addons/registry-svc.yaml (398 bytes)
I0812 17:56:44.657071 390563 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.3/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I0812 17:56:44.666769 390563 addons.go:431] installing /etc/kubernetes/addons/ig-serviceaccount.yaml
I0812 17:56:44.666818 390563 ssh_runner.go:362] scp inspektor-gadget/ig-serviceaccount.yaml --> /etc/kubernetes/addons/ig-serviceaccount.yaml (80 bytes)
I0812 17:56:44.669741 390563 addons.go:431] installing /etc/kubernetes/addons/helm-tiller-svc.yaml
I0812 17:56:44.669767 390563 ssh_runner.go:362] scp helm-tiller/helm-tiller-svc.yaml --> /etc/kubernetes/addons/helm-tiller-svc.yaml (951 bytes)
I0812 17:56:44.747741 390563 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.3/kubectl apply -f /etc/kubernetes/addons/nvidia-device-plugin.yaml
I0812 17:56:44.751326 390563 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.3/kubectl apply -f /etc/kubernetes/addons/deployment.yaml
I0812 17:56:44.857611 390563 addons.go:431] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml
I0812 17:56:44.857645 390563 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshotcontents.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml (23126 bytes)
I0812 17:56:44.861664 390563 addons.go:431] installing /etc/kubernetes/addons/rbac-hostpath.yaml
I0812 17:56:44.861691 390563 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-hostpath.yaml --> /etc/kubernetes/addons/rbac-hostpath.yaml (4266 bytes)
I0812 17:56:44.868537 390563 addons.go:431] installing /etc/kubernetes/addons/yakd-sa.yaml
I0812 17:56:44.868582 390563 ssh_runner.go:362] scp yakd/yakd-sa.yaml --> /etc/kubernetes/addons/yakd-sa.yaml (247 bytes)
I0812 17:56:44.950526 390563 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.3/kubectl apply -f /etc/kubernetes/addons/helm-tiller-dp.yaml -f /etc/kubernetes/addons/helm-tiller-rbac.yaml -f /etc/kubernetes/addons/helm-tiller-svc.yaml
I0812 17:56:44.964362 390563 addons.go:431] installing /etc/kubernetes/addons/registry-proxy.yaml
I0812 17:56:44.964395 390563 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-proxy.yaml (947 bytes)
I0812 17:56:44.967828 390563 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.3/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I0812 17:56:44.970421 390563 addons.go:431] installing /etc/kubernetes/addons/ig-role.yaml
I0812 17:56:44.970445 390563 ssh_runner.go:362] scp inspektor-gadget/ig-role.yaml --> /etc/kubernetes/addons/ig-role.yaml (210 bytes)
I0812 17:56:45.054532 390563 addons.go:431] installing /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml
I0812 17:56:45.054579 390563 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-health-monitor-controller.yaml --> /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml (3038 bytes)
I0812 17:56:45.057020 390563 addons.go:431] installing /etc/kubernetes/addons/metrics-server-rbac.yaml
I0812 17:56:45.057046 390563 ssh_runner.go:362] scp metrics-server/metrics-server-rbac.yaml --> /etc/kubernetes/addons/metrics-server-rbac.yaml (2175 bytes)
I0812 17:56:45.148728 390563 addons.go:431] installing /etc/kubernetes/addons/yakd-crb.yaml
I0812 17:56:45.148759 390563 ssh_runner.go:362] scp yakd/yakd-crb.yaml --> /etc/kubernetes/addons/yakd-crb.yaml (422 bytes)
I0812 17:56:45.151745 390563 addons.go:431] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml
I0812 17:56:45.151813 390563 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshots.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml (19582 bytes)
I0812 17:56:45.257704 390563 addons.go:431] installing /etc/kubernetes/addons/rbac-external-provisioner.yaml
I0812 17:56:45.257803 390563 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-provisioner.yaml --> /etc/kubernetes/addons/rbac-external-provisioner.yaml (4442 bytes)
I0812 17:56:45.262762 390563 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.3/kubectl apply -f /etc/kubernetes/addons/registry-rc.yaml -f /etc/kubernetes/addons/registry-svc.yaml -f /etc/kubernetes/addons/registry-proxy.yaml
I0812 17:56:45.350135 390563 addons.go:431] installing /etc/kubernetes/addons/ig-rolebinding.yaml
I0812 17:56:45.350167 390563 ssh_runner.go:362] scp inspektor-gadget/ig-rolebinding.yaml --> /etc/kubernetes/addons/ig-rolebinding.yaml (244 bytes)
I0812 17:56:45.358441 390563 addons.go:431] installing /etc/kubernetes/addons/metrics-server-service.yaml
I0812 17:56:45.358470 390563 ssh_runner.go:362] scp metrics-server/metrics-server-service.yaml --> /etc/kubernetes/addons/metrics-server-service.yaml (446 bytes)
I0812 17:56:45.447822 390563 addons.go:431] installing /etc/kubernetes/addons/rbac-external-resizer.yaml
I0812 17:56:45.447912 390563 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-resizer.yaml --> /etc/kubernetes/addons/rbac-external-resizer.yaml (2943 bytes)
I0812 17:56:45.553484 390563 addons.go:431] installing /etc/kubernetes/addons/ig-clusterrole.yaml
I0812 17:56:45.553571 390563 ssh_runner.go:362] scp inspektor-gadget/ig-clusterrole.yaml --> /etc/kubernetes/addons/ig-clusterrole.yaml (1485 bytes)
I0812 17:56:45.647768 390563 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.3/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
I0812 17:56:45.653504 390563 addons.go:431] installing /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml
I0812 17:56:45.653595 390563 ssh_runner.go:362] scp volumesnapshots/rbac-volume-snapshot-controller.yaml --> /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml (3545 bytes)
I0812 17:56:45.659042 390563 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.30.3/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.30.3/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -": (1.497843664s)
I0812 17:56:45.659223 390563 start.go:971] {"host.minikube.internal": 192.168.49.1} host record injected into CoreDNS's ConfigMap
I0812 17:56:45.659158 390563 ssh_runner.go:235] Completed: sudo systemctl start kubelet: (1.193771213s)
I0812 17:56:45.660281 390563 node_ready.go:35] waiting up to 6m0s for node "addons-808918" to be "Ready" ...
I0812 17:56:45.669533 390563 addons.go:431] installing /etc/kubernetes/addons/rbac-external-snapshotter.yaml
I0812 17:56:45.669562 390563 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-snapshotter.yaml --> /etc/kubernetes/addons/rbac-external-snapshotter.yaml (3149 bytes)
I0812 17:56:45.748445 390563 addons.go:431] installing /etc/kubernetes/addons/yakd-svc.yaml
I0812 17:56:45.748536 390563 ssh_runner.go:362] scp yakd/yakd-svc.yaml --> /etc/kubernetes/addons/yakd-svc.yaml (412 bytes)
I0812 17:56:45.947703 390563 addons.go:431] installing /etc/kubernetes/addons/ig-clusterrolebinding.yaml
I0812 17:56:45.947800 390563 ssh_runner.go:362] scp inspektor-gadget/ig-clusterrolebinding.yaml --> /etc/kubernetes/addons/ig-clusterrolebinding.yaml (274 bytes)
I0812 17:56:46.149068 390563 addons.go:431] installing /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I0812 17:56:46.149149 390563 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml (1475 bytes)
I0812 17:56:46.156066 390563 addons.go:431] installing /etc/kubernetes/addons/yakd-dp.yaml
I0812 17:56:46.156148 390563 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/yakd-dp.yaml (2017 bytes)
I0812 17:56:46.168425 390563 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-attacher.yaml
I0812 17:56:46.168504 390563 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-attacher.yaml (2143 bytes)
I0812 17:56:46.352208 390563 kapi.go:214] "coredns" deployment in "kube-system" namespace and "addons-808918" context rescaled to 1 replicas
I0812 17:56:46.356526 390563 addons.go:431] installing /etc/kubernetes/addons/ig-crd.yaml
I0812 17:56:46.356565 390563 ssh_runner.go:362] scp inspektor-gadget/ig-crd.yaml --> /etc/kubernetes/addons/ig-crd.yaml (5216 bytes)
I0812 17:56:46.360766 390563 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.3/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I0812 17:56:46.447688 390563 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.3/kubectl apply -f /etc/kubernetes/addons/yakd-ns.yaml -f /etc/kubernetes/addons/yakd-sa.yaml -f /etc/kubernetes/addons/yakd-crb.yaml -f /etc/kubernetes/addons/yakd-svc.yaml -f /etc/kubernetes/addons/yakd-dp.yaml
I0812 17:56:46.656580 390563 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml
I0812 17:56:46.656681 390563 ssh_runner.go:362] scp csi-hostpath-driver/deploy/csi-hostpath-driverinfo.yaml --> /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml (1274 bytes)
I0812 17:56:46.849449 390563 addons.go:431] installing /etc/kubernetes/addons/ig-daemonset.yaml
I0812 17:56:46.849535 390563 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-daemonset.yaml (7735 bytes)
I0812 17:56:46.948286 390563 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-plugin.yaml
I0812 17:56:46.948329 390563 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-plugin.yaml (8201 bytes)
I0812 17:56:47.148586 390563 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.3/kubectl apply -f /etc/kubernetes/addons/ig-namespace.yaml -f /etc/kubernetes/addons/ig-serviceaccount.yaml -f /etc/kubernetes/addons/ig-role.yaml -f /etc/kubernetes/addons/ig-rolebinding.yaml -f /etc/kubernetes/addons/ig-clusterrole.yaml -f /etc/kubernetes/addons/ig-clusterrolebinding.yaml -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-daemonset.yaml
I0812 17:56:47.256550 390563 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-resizer.yaml
I0812 17:56:47.256578 390563 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-resizer.yaml (2191 bytes)
I0812 17:56:47.447807 390563 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-storageclass.yaml
I0812 17:56:47.447841 390563 ssh_runner.go:362] scp csi-hostpath-driver/deploy/csi-hostpath-storageclass.yaml --> /etc/kubernetes/addons/csi-hostpath-storageclass.yaml (846 bytes)
I0812 17:56:47.568544 390563 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.3/kubectl apply -f /etc/kubernetes/addons/rbac-external-attacher.yaml -f /etc/kubernetes/addons/rbac-hostpath.yaml -f /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml -f /etc/kubernetes/addons/rbac-external-provisioner.yaml -f /etc/kubernetes/addons/rbac-external-resizer.yaml -f /etc/kubernetes/addons/rbac-external-snapshotter.yaml -f /etc/kubernetes/addons/csi-hostpath-attacher.yaml -f /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml -f /etc/kubernetes/addons/csi-hostpath-plugin.yaml -f /etc/kubernetes/addons/csi-hostpath-resizer.yaml -f /etc/kubernetes/addons/csi-hostpath-storageclass.yaml
I0812 17:56:47.771401 390563 node_ready.go:53] node "addons-808918" has status "Ready":"False"
I0812 17:56:50.164586 390563 node_ready.go:53] node "addons-808918" has status "Ready":"False"
I0812 17:56:50.549714 390563 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.3/kubectl apply -f /etc/kubernetes/addons/ingress-deploy.yaml: (6.083998393s)
I0812 17:56:50.549767 390563 addons.go:475] Verifying addon ingress=true in "addons-808918"
I0812 17:56:50.549887 390563 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.3/kubectl apply -f /etc/kubernetes/addons/ingress-dns-pod.yaml: (5.996481941s)
I0812 17:56:50.549998 390563 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.3/kubectl apply -f /etc/kubernetes/addons/storage-provisioner-rancher.yaml: (5.902009061s)
I0812 17:56:50.550103 390563 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.3/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (5.893004004s)
I0812 17:56:50.550155 390563 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.3/kubectl apply -f /etc/kubernetes/addons/nvidia-device-plugin.yaml: (5.802366406s)
I0812 17:56:50.550210 390563 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.3/kubectl apply -f /etc/kubernetes/addons/deployment.yaml: (5.798853114s)
I0812 17:56:50.550254 390563 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.3/kubectl apply -f /etc/kubernetes/addons/helm-tiller-dp.yaml -f /etc/kubernetes/addons/helm-tiller-rbac.yaml -f /etc/kubernetes/addons/helm-tiller-svc.yaml: (5.599692569s)
I0812 17:56:50.550303 390563 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.3/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: (5.582450439s)
I0812 17:56:50.550351 390563 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.3/kubectl apply -f /etc/kubernetes/addons/registry-rc.yaml -f /etc/kubernetes/addons/registry-svc.yaml -f /etc/kubernetes/addons/registry-proxy.yaml: (5.287514988s)
I0812 17:56:50.550370 390563 addons.go:475] Verifying addon registry=true in "addons-808918"
I0812 17:56:50.550561 390563 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.3/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: (4.902693432s)
I0812 17:56:50.550868 390563 addons.go:475] Verifying addon metrics-server=true in "addons-808918"
I0812 17:56:50.554202 390563 out.go:177] * Verifying registry addon...
I0812 17:56:50.554249 390563 out.go:177] * Verifying ingress addon...
I0812 17:56:50.556816 390563 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=registry" in ns "kube-system" ...
I0812 17:56:50.556816 390563 kapi.go:75] Waiting for pod with label "app.kubernetes.io/name=ingress-nginx" in ns "ingress-nginx" ...
W0812 17:56:50.561841 390563 out.go:239] ! Enabling 'default-storageclass' returned an error: running callbacks: [Error making standard the default storage class: Error while marking storage class local-path as non-default: Operation cannot be fulfilled on storageclasses.storage.k8s.io "local-path": the object has been modified; please apply your changes to the latest version and try again]
I0812 17:56:50.564196 390563 kapi.go:86] Found 1 Pods for label selector kubernetes.io/minikube-addons=registry
I0812 17:56:50.564216 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0812 17:56:50.564505 390563 kapi.go:86] Found 3 Pods for label selector app.kubernetes.io/name=ingress-nginx
I0812 17:56:50.564525 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:56:51.064425 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:56:51.064632 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0812 17:56:51.260643 390563 ssh_runner.go:362] scp memory --> /var/lib/minikube/google_application_credentials.json (162 bytes)
I0812 17:56:51.260725 390563 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-808918
I0812 17:56:51.280663 390563 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33138 SSHKeyPath:/home/jenkins/minikube-integration/19419-381930/.minikube/machines/addons-808918/id_rsa Username:docker}
I0812 17:56:51.357518 390563 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.3/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: (4.996701321s)
W0812 17:56:51.357579 390563 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.3/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: Process exited with status 1
stdout:
customresourcedefinition.apiextensions.k8s.io/volumesnapshotclasses.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshotcontents.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshots.snapshot.storage.k8s.io created
serviceaccount/snapshot-controller created
clusterrole.rbac.authorization.k8s.io/snapshot-controller-runner created
clusterrolebinding.rbac.authorization.k8s.io/snapshot-controller-role created
role.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
rolebinding.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
deployment.apps/snapshot-controller created
stderr:
error: resource mapping not found for name: "csi-hostpath-snapclass" namespace: "" from "/etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml": no matches for kind "VolumeSnapshotClass" in version "snapshot.storage.k8s.io/v1"
ensure CRDs are installed first
I0812 17:56:51.357629 390563 retry.go:31] will retry after 125.338355ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.3/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: Process exited with status 1
stdout:
customresourcedefinition.apiextensions.k8s.io/volumesnapshotclasses.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshotcontents.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshots.snapshot.storage.k8s.io created
serviceaccount/snapshot-controller created
clusterrole.rbac.authorization.k8s.io/snapshot-controller-runner created
clusterrolebinding.rbac.authorization.k8s.io/snapshot-controller-role created
role.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
rolebinding.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
deployment.apps/snapshot-controller created
stderr:
error: resource mapping not found for name: "csi-hostpath-snapclass" namespace: "" from "/etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml": no matches for kind "VolumeSnapshotClass" in version "snapshot.storage.k8s.io/v1"
ensure CRDs are installed first
I0812 17:56:51.357707 390563 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.3/kubectl apply -f /etc/kubernetes/addons/yakd-ns.yaml -f /etc/kubernetes/addons/yakd-sa.yaml -f /etc/kubernetes/addons/yakd-crb.yaml -f /etc/kubernetes/addons/yakd-svc.yaml -f /etc/kubernetes/addons/yakd-dp.yaml: (4.909926532s)
I0812 17:56:51.358045 390563 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.3/kubectl apply -f /etc/kubernetes/addons/ig-namespace.yaml -f /etc/kubernetes/addons/ig-serviceaccount.yaml -f /etc/kubernetes/addons/ig-role.yaml -f /etc/kubernetes/addons/ig-rolebinding.yaml -f /etc/kubernetes/addons/ig-clusterrole.yaml -f /etc/kubernetes/addons/ig-clusterrolebinding.yaml -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-daemonset.yaml: (4.209395762s)
I0812 17:56:51.359107 390563 out.go:177] * To access YAKD - Kubernetes Dashboard, wait for Pod to be ready and run the following command:
minikube -p addons-808918 service yakd-dashboard -n yakd-dashboard
I0812 17:56:51.483610 390563 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.3/kubectl apply --force -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I0812 17:56:51.561115 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:56:51.561981 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0812 17:56:51.566350 390563 ssh_runner.go:362] scp memory --> /var/lib/minikube/google_cloud_project (12 bytes)
I0812 17:56:51.646543 390563 addons.go:234] Setting addon gcp-auth=true in "addons-808918"
I0812 17:56:51.646615 390563 host.go:66] Checking if "addons-808918" exists ...
I0812 17:56:51.647187 390563 cli_runner.go:164] Run: docker container inspect addons-808918 --format={{.State.Status}}
I0812 17:56:51.671411 390563 ssh_runner.go:195] Run: cat /var/lib/minikube/google_application_credentials.json
I0812 17:56:51.671475 390563 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-808918
I0812 17:56:51.692865 390563 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33138 SSHKeyPath:/home/jenkins/minikube-integration/19419-381930/.minikube/machines/addons-808918/id_rsa Username:docker}
I0812 17:56:51.986485 390563 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.3/kubectl apply -f /etc/kubernetes/addons/rbac-external-attacher.yaml -f /etc/kubernetes/addons/rbac-hostpath.yaml -f /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml -f /etc/kubernetes/addons/rbac-external-provisioner.yaml -f /etc/kubernetes/addons/rbac-external-resizer.yaml -f /etc/kubernetes/addons/rbac-external-snapshotter.yaml -f /etc/kubernetes/addons/csi-hostpath-attacher.yaml -f /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml -f /etc/kubernetes/addons/csi-hostpath-plugin.yaml -f /etc/kubernetes/addons/csi-hostpath-resizer.yaml -f /etc/kubernetes/addons/csi-hostpath-storageclass.yaml: (4.417869588s)
I0812 17:56:51.986535 390563 addons.go:475] Verifying addon csi-hostpath-driver=true in "addons-808918"
I0812 17:56:51.988020 390563 out.go:177] * Verifying csi-hostpath-driver addon...
I0812 17:56:51.990093 390563 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=csi-hostpath-driver" in ns "kube-system" ...
I0812 17:56:52.049969 390563 kapi.go:86] Found 2 Pods for label selector kubernetes.io/minikube-addons=csi-hostpath-driver
I0812 17:56:52.050024 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:56:52.061544 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0812 17:56:52.061710 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:56:52.385220 390563 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.1
I0812 17:56:52.386493 390563 out.go:177] - Using image gcr.io/k8s-minikube/gcp-auth-webhook:v0.1.2
I0812 17:56:52.387675 390563 addons.go:431] installing /etc/kubernetes/addons/gcp-auth-ns.yaml
I0812 17:56:52.387691 390563 ssh_runner.go:362] scp gcp-auth/gcp-auth-ns.yaml --> /etc/kubernetes/addons/gcp-auth-ns.yaml (700 bytes)
I0812 17:56:52.404273 390563 addons.go:431] installing /etc/kubernetes/addons/gcp-auth-service.yaml
I0812 17:56:52.404294 390563 ssh_runner.go:362] scp gcp-auth/gcp-auth-service.yaml --> /etc/kubernetes/addons/gcp-auth-service.yaml (788 bytes)
I0812 17:56:52.420672 390563 addons.go:431] installing /etc/kubernetes/addons/gcp-auth-webhook.yaml
I0812 17:56:52.420690 390563 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/gcp-auth-webhook.yaml (5421 bytes)
I0812 17:56:52.436181 390563 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.3/kubectl apply -f /etc/kubernetes/addons/gcp-auth-ns.yaml -f /etc/kubernetes/addons/gcp-auth-service.yaml -f /etc/kubernetes/addons/gcp-auth-webhook.yaml
I0812 17:56:52.495489 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:56:52.562426 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0812 17:56:52.563290 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:56:52.663590 390563 node_ready.go:53] node "addons-808918" has status "Ready":"False"
I0812 17:56:52.763546 390563 addons.go:475] Verifying addon gcp-auth=true in "addons-808918"
I0812 17:56:52.764906 390563 out.go:177] * Verifying gcp-auth addon...
I0812 17:56:52.767074 390563 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=gcp-auth" in ns "gcp-auth" ...
I0812 17:56:52.769134 390563 kapi.go:86] Found 1 Pods for label selector kubernetes.io/minikube-addons=gcp-auth
I0812 17:56:52.769150 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:56:52.994482 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:56:53.060413 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0812 17:56:53.060650 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:56:53.270364 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:56:53.494252 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:56:53.561954 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0812 17:56:53.562279 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:56:53.771894 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:56:54.049932 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:56:54.061843 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0812 17:56:54.062771 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:56:54.270644 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:56:54.495220 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:56:54.560469 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0812 17:56:54.560674 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:56:54.664371 390563 node_ready.go:53] node "addons-808918" has status "Ready":"False"
I0812 17:56:54.770780 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:56:54.995626 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:56:55.060691 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0812 17:56:55.061064 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:56:55.270948 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:56:55.494811 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:56:55.560849 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0812 17:56:55.561090 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:56:55.770588 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:56:55.994487 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:56:56.060440 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0812 17:56:56.060917 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:56:56.270982 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:56:56.494815 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:56:56.560745 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0812 17:56:56.560911 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:56:56.770119 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:56:56.994784 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:56:57.060875 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0812 17:56:57.061063 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:56:57.169280 390563 node_ready.go:53] node "addons-808918" has status "Ready":"False"
I0812 17:56:57.270807 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:56:57.494432 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:56:57.560639 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:56:57.560639 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0812 17:56:57.770968 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:56:57.994751 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:56:58.060626 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0812 17:56:58.060728 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:56:58.270989 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:56:58.496949 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:56:58.560840 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0812 17:56:58.561061 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:56:58.771121 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:56:58.994672 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:56:59.060846 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0812 17:56:59.061007 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:56:59.270716 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:56:59.494475 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:56:59.560326 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0812 17:56:59.560516 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:56:59.664031 390563 node_ready.go:53] node "addons-808918" has status "Ready":"False"
I0812 17:56:59.770832 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:56:59.994537 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:00.060446 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0812 17:57:00.060622 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:00.270708 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:00.494356 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:00.561149 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0812 17:57:00.561207 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:00.770512 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:00.994417 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:01.060366 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0812 17:57:01.060559 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:01.270953 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:01.494562 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:01.560481 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0812 17:57:01.560665 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:01.770989 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:01.994649 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:02.060699 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0812 17:57:02.060884 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:02.163319 390563 node_ready.go:53] node "addons-808918" has status "Ready":"False"
I0812 17:57:02.270852 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:02.494486 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:02.560487 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0812 17:57:02.560657 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:02.771803 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:02.994929 390563 kapi.go:86] Found 3 Pods for label selector kubernetes.io/minikube-addons=csi-hostpath-driver
I0812 17:57:02.994960 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:03.073626 390563 kapi.go:86] Found 2 Pods for label selector kubernetes.io/minikube-addons=registry
I0812 17:57:03.073730 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0812 17:57:03.074907 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:03.164013 390563 node_ready.go:49] node "addons-808918" has status "Ready":"True"
I0812 17:57:03.164044 390563 node_ready.go:38] duration metric: took 17.503702041s for node "addons-808918" to be "Ready" ...
I0812 17:57:03.164058 390563 pod_ready.go:35] extra waiting up to 6m0s for all system-critical pods including labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0812 17:57:03.173976 390563 pod_ready.go:78] waiting up to 6m0s for pod "coredns-7db6d8ff4d-k9fzz" in "kube-system" namespace to be "Ready" ...
I0812 17:57:03.270554 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:03.497192 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:03.562265 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:03.563943 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0812 17:57:03.770847 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:04.052417 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:04.062272 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:04.063914 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0812 17:57:04.180661 390563 pod_ready.go:92] pod "coredns-7db6d8ff4d-k9fzz" in "kube-system" namespace has status "Ready":"True"
I0812 17:57:04.180690 390563 pod_ready.go:81] duration metric: took 1.006689071s for pod "coredns-7db6d8ff4d-k9fzz" in "kube-system" namespace to be "Ready" ...
I0812 17:57:04.180716 390563 pod_ready.go:78] waiting up to 6m0s for pod "etcd-addons-808918" in "kube-system" namespace to be "Ready" ...
I0812 17:57:04.252001 390563 pod_ready.go:92] pod "etcd-addons-808918" in "kube-system" namespace has status "Ready":"True"
I0812 17:57:04.252028 390563 pod_ready.go:81] duration metric: took 71.303335ms for pod "etcd-addons-808918" in "kube-system" namespace to be "Ready" ...
I0812 17:57:04.252045 390563 pod_ready.go:78] waiting up to 6m0s for pod "kube-apiserver-addons-808918" in "kube-system" namespace to be "Ready" ...
I0812 17:57:04.256560 390563 pod_ready.go:92] pod "kube-apiserver-addons-808918" in "kube-system" namespace has status "Ready":"True"
I0812 17:57:04.256585 390563 pod_ready.go:81] duration metric: took 4.531355ms for pod "kube-apiserver-addons-808918" in "kube-system" namespace to be "Ready" ...
I0812 17:57:04.256597 390563 pod_ready.go:78] waiting up to 6m0s for pod "kube-controller-manager-addons-808918" in "kube-system" namespace to be "Ready" ...
I0812 17:57:04.261371 390563 pod_ready.go:92] pod "kube-controller-manager-addons-808918" in "kube-system" namespace has status "Ready":"True"
I0812 17:57:04.261394 390563 pod_ready.go:81] duration metric: took 4.788161ms for pod "kube-controller-manager-addons-808918" in "kube-system" namespace to be "Ready" ...
I0812 17:57:04.261409 390563 pod_ready.go:78] waiting up to 6m0s for pod "kube-proxy-gq98h" in "kube-system" namespace to be "Ready" ...
I0812 17:57:04.269880 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:04.364870 390563 pod_ready.go:92] pod "kube-proxy-gq98h" in "kube-system" namespace has status "Ready":"True"
I0812 17:57:04.364895 390563 pod_ready.go:81] duration metric: took 103.47911ms for pod "kube-proxy-gq98h" in "kube-system" namespace to be "Ready" ...
I0812 17:57:04.364906 390563 pod_ready.go:78] waiting up to 6m0s for pod "kube-scheduler-addons-808918" in "kube-system" namespace to be "Ready" ...
I0812 17:57:04.496366 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:04.561841 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:04.561871 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0812 17:57:04.764870 390563 pod_ready.go:92] pod "kube-scheduler-addons-808918" in "kube-system" namespace has status "Ready":"True"
I0812 17:57:04.764894 390563 pod_ready.go:81] duration metric: took 399.982151ms for pod "kube-scheduler-addons-808918" in "kube-system" namespace to be "Ready" ...
I0812 17:57:04.764904 390563 pod_ready.go:78] waiting up to 6m0s for pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace to be "Ready" ...
I0812 17:57:04.770607 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:04.996375 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:05.062469 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0812 17:57:05.062514 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:05.270591 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:05.496038 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:05.563789 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:05.565118 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0812 17:57:05.770499 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:05.996908 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:06.061981 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0812 17:57:06.062167 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:06.270895 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:06.496443 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:06.561893 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:06.561992 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0812 17:57:06.771024 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:06.771270 390563 pod_ready.go:102] pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace has status "Ready":"False"
I0812 17:57:06.997022 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:07.062977 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:07.063681 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0812 17:57:07.270218 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:07.496351 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:07.561785 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:07.562174 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0812 17:57:07.771052 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:07.995899 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:08.061092 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:08.061400 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0812 17:57:08.270150 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:08.496287 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:08.561188 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:08.561508 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0812 17:57:08.770399 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:08.998238 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:09.060954 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:09.061196 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0812 17:57:09.269634 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:09.269854 390563 pod_ready.go:102] pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace has status "Ready":"False"
I0812 17:57:09.495655 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:09.561517 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:09.561860 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0812 17:57:09.770320 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:09.994704 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:10.061665 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:10.061834 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0812 17:57:10.270723 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:10.495252 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:10.561468 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:10.561697 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0812 17:57:10.770641 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:10.996496 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:11.061554 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:11.062366 390563 kapi.go:107] duration metric: took 20.505542061s to wait for kubernetes.io/minikube-addons=registry ...
I0812 17:57:11.270414 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:11.270492 390563 pod_ready.go:102] pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace has status "Ready":"False"
I0812 17:57:11.495774 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:11.561384 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:11.772301 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:11.995575 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:12.062139 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:12.270410 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:12.495329 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:12.561653 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:12.770175 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:12.996223 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:13.061390 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:13.271054 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:13.271114 390563 pod_ready.go:102] pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace has status "Ready":"False"
I0812 17:57:13.496801 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:13.561409 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:13.770495 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:13.996175 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:14.063123 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:14.270663 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:14.551339 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:14.561551 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:14.770790 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:14.995884 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:15.060773 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:15.270108 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:15.494608 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:15.562074 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:15.769924 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:15.770439 390563 pod_ready.go:102] pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace has status "Ready":"False"
I0812 17:57:15.995232 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:16.062068 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:16.270822 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:16.551051 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:16.561960 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:16.770346 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:17.048346 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:17.061435 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:17.270140 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:17.496236 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:17.561432 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:17.770947 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:17.771330 390563 pod_ready.go:102] pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace has status "Ready":"False"
I0812 17:57:17.996300 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:18.061415 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:18.275900 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:18.495540 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:18.562059 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:18.770394 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:18.995751 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:19.061972 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:19.270675 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:19.496385 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:19.561710 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:19.772229 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:19.773645 390563 pod_ready.go:102] pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace has status "Ready":"False"
I0812 17:57:19.995484 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:20.061367 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:20.270636 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:20.495903 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:20.561504 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:20.769977 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:20.996321 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:21.061419 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:21.270869 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:21.495551 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:21.562129 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:21.769740 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:21.995211 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:22.060915 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:22.270304 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:22.270381 390563 pod_ready.go:102] pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace has status "Ready":"False"
I0812 17:57:22.498100 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:22.560927 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:22.770766 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:23.051671 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:23.061803 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:23.271488 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:23.495430 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:23.561376 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:23.771165 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:23.996385 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:24.061509 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:24.270540 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:24.270648 390563 pod_ready.go:102] pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace has status "Ready":"False"
I0812 17:57:24.495432 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:24.561923 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:24.770309 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:24.996301 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:25.062181 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:25.270667 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:25.495222 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:25.561167 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:25.770681 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:25.997112 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:26.061089 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:26.270541 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:26.494916 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:26.560652 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:26.769861 390563 pod_ready.go:102] pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace has status "Ready":"False"
I0812 17:57:26.769934 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:26.995478 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:27.061313 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:27.270519 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:27.496372 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:27.561625 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:27.770852 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:27.996961 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:28.061243 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:28.270509 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:28.496219 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:28.560732 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:28.770857 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:28.770941 390563 pod_ready.go:102] pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace has status "Ready":"False"
I0812 17:57:28.995619 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:29.061587 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:29.270813 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:29.496520 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:29.561485 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:29.770195 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:29.996069 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:30.061588 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:30.270831 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:30.496128 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:30.560943 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:30.770335 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:30.994578 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:31.061303 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:31.270632 390563 pod_ready.go:102] pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace has status "Ready":"False"
I0812 17:57:31.270753 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:31.496347 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:31.560905 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:31.770559 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:31.995205 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:32.060839 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:32.270164 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:32.496187 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:32.561837 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:32.770254 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:32.997157 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:33.060762 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:33.270937 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:33.271135 390563 pod_ready.go:102] pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace has status "Ready":"False"
I0812 17:57:33.496414 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:33.561893 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:33.771054 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:33.995988 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:34.061588 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:34.270829 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:34.496234 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:34.561007 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:34.770526 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:34.995441 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:35.061122 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:35.270461 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:35.494719 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:35.561402 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:35.772362 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:35.772834 390563 pod_ready.go:102] pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace has status "Ready":"False"
I0812 17:57:36.050755 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:36.061580 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:36.272316 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:36.551983 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:36.562082 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:36.772731 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:37.053171 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:37.062681 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:37.270930 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:37.549991 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:37.562055 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:37.770068 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:37.995665 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:38.061417 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:38.270540 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:38.271251 390563 pod_ready.go:102] pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace has status "Ready":"False"
I0812 17:57:38.495920 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:38.560863 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:38.770786 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:38.996387 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:39.061103 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:39.272449 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:39.496558 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:39.561094 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:39.770720 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:39.995956 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:40.060786 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:40.270312 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:40.495746 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:40.561523 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:40.771260 390563 pod_ready.go:102] pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace has status "Ready":"False"
I0812 17:57:40.771260 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:40.995646 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:41.061425 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:41.270918 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:41.497008 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:41.561378 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:41.770616 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:41.995182 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:42.061227 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:42.272784 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:42.495330 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:42.561564 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:42.771238 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:42.771858 390563 pod_ready.go:102] pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace has status "Ready":"False"
I0812 17:57:42.995940 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:43.060945 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:43.270173 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:43.498507 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:43.561587 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:43.770551 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:43.996690 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:44.060986 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:44.270581 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:44.496359 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:44.562282 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:44.770265 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:45.149752 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:45.150212 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:45.272262 390563 pod_ready.go:102] pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace has status "Ready":"False"
I0812 17:57:45.272968 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:45.554176 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:45.564138 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:45.770740 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:46.052272 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:46.062516 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:46.355390 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:46.552121 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:46.568425 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:46.850613 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:47.149630 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:47.156770 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:47.270492 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:47.496177 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:47.562195 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:47.770504 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:47.770692 390563 pod_ready.go:102] pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace has status "Ready":"False"
I0812 17:57:47.994937 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:48.060539 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:48.270364 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:48.496568 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:48.560961 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:48.770399 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:48.996631 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:49.061833 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:49.270546 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:49.496090 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:49.560827 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:49.770339 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:49.770961 390563 pod_ready.go:102] pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace has status "Ready":"False"
I0812 17:57:49.995537 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:50.061215 390563 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0812 17:57:50.269771 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:50.495630 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:50.561938 390563 kapi.go:107] duration metric: took 1m0.005119162s to wait for app.kubernetes.io/name=ingress-nginx ...
I0812 17:57:50.770529 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:50.995282 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:51.270474 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0812 17:57:51.494964 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:51.770758 390563 kapi.go:107] duration metric: took 59.003681965s to wait for kubernetes.io/minikube-addons=gcp-auth ...
I0812 17:57:51.771130 390563 pod_ready.go:102] pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace has status "Ready":"False"
I0812 17:57:51.772696 390563 out.go:177] * Your GCP credentials will now be mounted into every pod created in the addons-808918 cluster.
I0812 17:57:51.774390 390563 out.go:177] * If you don't want your credentials mounted into a specific pod, add a label with the `gcp-auth-skip-secret` key to your pod configuration.
I0812 17:57:51.775717 390563 out.go:177] * If you want existing pods to be mounted with credentials, either recreate them or rerun addons enable with --refresh.
I0812 17:57:52.052050 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:52.496328 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:52.995198 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:53.495394 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:53.996928 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:54.271449 390563 pod_ready.go:102] pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace has status "Ready":"False"
I0812 17:57:54.496477 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:54.994924 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:55.497311 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:55.995538 390563 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0812 17:57:56.495319 390563 kapi.go:107] duration metric: took 1m4.505224343s to wait for kubernetes.io/minikube-addons=csi-hostpath-driver ...
I0812 17:57:56.496923 390563 out.go:177] * Enabled addons: ingress-dns, storage-provisioner, nvidia-device-plugin, cloud-spanner, helm-tiller, metrics-server, storage-provisioner-rancher, inspektor-gadget, yakd, volumesnapshots, registry, ingress, gcp-auth, csi-hostpath-driver
I0812 17:57:56.498146 390563 addons.go:510] duration metric: took 1m12.568352093s for enable addons: enabled=[ingress-dns storage-provisioner nvidia-device-plugin cloud-spanner helm-tiller metrics-server storage-provisioner-rancher inspektor-gadget yakd volumesnapshots registry ingress gcp-auth csi-hostpath-driver]
I0812 17:57:56.770845 390563 pod_ready.go:102] pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace has status "Ready":"False"
I0812 17:57:59.270358 390563 pod_ready.go:102] pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace has status "Ready":"False"
I0812 17:58:01.271468 390563 pod_ready.go:102] pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace has status "Ready":"False"
I0812 17:58:03.770883 390563 pod_ready.go:102] pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace has status "Ready":"False"
I0812 17:58:05.771276 390563 pod_ready.go:102] pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace has status "Ready":"False"
I0812 17:58:08.270207 390563 pod_ready.go:102] pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace has status "Ready":"False"
I0812 17:58:10.770844 390563 pod_ready.go:102] pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace has status "Ready":"False"
I0812 17:58:13.270537 390563 pod_ready.go:102] pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace has status "Ready":"False"
I0812 17:58:15.770819 390563 pod_ready.go:102] pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace has status "Ready":"False"
I0812 17:58:17.770888 390563 pod_ready.go:102] pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace has status "Ready":"False"
I0812 17:58:19.772028 390563 pod_ready.go:102] pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace has status "Ready":"False"
I0812 17:58:22.269974 390563 pod_ready.go:102] pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace has status "Ready":"False"
I0812 17:58:24.270328 390563 pod_ready.go:102] pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace has status "Ready":"False"
I0812 17:58:26.270407 390563 pod_ready.go:102] pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace has status "Ready":"False"
I0812 17:58:28.770976 390563 pod_ready.go:102] pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace has status "Ready":"False"
I0812 17:58:31.270165 390563 pod_ready.go:102] pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace has status "Ready":"False"
I0812 17:58:33.270225 390563 pod_ready.go:102] pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace has status "Ready":"False"
I0812 17:58:35.271170 390563 pod_ready.go:102] pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace has status "Ready":"False"
I0812 17:58:37.770419 390563 pod_ready.go:102] pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace has status "Ready":"False"
I0812 17:58:40.269973 390563 pod_ready.go:102] pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace has status "Ready":"False"
I0812 17:58:42.270819 390563 pod_ready.go:102] pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace has status "Ready":"False"
I0812 17:58:43.770977 390563 pod_ready.go:92] pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace has status "Ready":"True"
I0812 17:58:43.771005 390563 pod_ready.go:81] duration metric: took 1m39.006094451s for pod "metrics-server-c59844bb4-5vl77" in "kube-system" namespace to be "Ready" ...
I0812 17:58:43.771016 390563 pod_ready.go:78] waiting up to 6m0s for pod "nvidia-device-plugin-daemonset-7wrl9" in "kube-system" namespace to be "Ready" ...
I0812 17:58:43.775229 390563 pod_ready.go:92] pod "nvidia-device-plugin-daemonset-7wrl9" in "kube-system" namespace has status "Ready":"True"
I0812 17:58:43.775252 390563 pod_ready.go:81] duration metric: took 4.228901ms for pod "nvidia-device-plugin-daemonset-7wrl9" in "kube-system" namespace to be "Ready" ...
I0812 17:58:43.775270 390563 pod_ready.go:38] duration metric: took 1m40.6111795s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0812 17:58:43.775290 390563 api_server.go:52] waiting for apiserver process to appear ...
I0812 17:58:43.775321 390563 cri.go:54] listing CRI containers in root : {State:all Name:kube-apiserver Namespaces:[]}
I0812 17:58:43.775372 390563 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I0812 17:58:43.809204 390563 cri.go:89] found id: "b21e7ec861b31b114311f638f7f92785f0c7c7a2eb803886a0a3c4fb3b490742"
I0812 17:58:43.809223 390563 cri.go:89] found id: ""
I0812 17:58:43.809231 390563 logs.go:276] 1 containers: [b21e7ec861b31b114311f638f7f92785f0c7c7a2eb803886a0a3c4fb3b490742]
I0812 17:58:43.809273 390563 ssh_runner.go:195] Run: which crictl
I0812 17:58:43.812564 390563 cri.go:54] listing CRI containers in root : {State:all Name:etcd Namespaces:[]}
I0812 17:58:43.812622 390563 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I0812 17:58:43.845860 390563 cri.go:89] found id: "a6e508d44055a56fcac43a3530f084186f88151812cea99189346bbd57cbba1b"
I0812 17:58:43.845886 390563 cri.go:89] found id: ""
I0812 17:58:43.845895 390563 logs.go:276] 1 containers: [a6e508d44055a56fcac43a3530f084186f88151812cea99189346bbd57cbba1b]
I0812 17:58:43.845949 390563 ssh_runner.go:195] Run: which crictl
I0812 17:58:43.849180 390563 cri.go:54] listing CRI containers in root : {State:all Name:coredns Namespaces:[]}
I0812 17:58:43.849244 390563 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I0812 17:58:43.882646 390563 cri.go:89] found id: "d969a16ee6c44afacf5a8112150b3bfc693e5cedce2b0fc0272085ed571b18ca"
I0812 17:58:43.882675 390563 cri.go:89] found id: ""
I0812 17:58:43.882688 390563 logs.go:276] 1 containers: [d969a16ee6c44afacf5a8112150b3bfc693e5cedce2b0fc0272085ed571b18ca]
I0812 17:58:43.882737 390563 ssh_runner.go:195] Run: which crictl
I0812 17:58:43.886077 390563 cri.go:54] listing CRI containers in root : {State:all Name:kube-scheduler Namespaces:[]}
I0812 17:58:43.886129 390563 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I0812 17:58:43.919383 390563 cri.go:89] found id: "5eb00253940e3eeeae339c8a8234039a48b3efa17916267fccc2abcb00191cf4"
I0812 17:58:43.919410 390563 cri.go:89] found id: ""
I0812 17:58:43.919421 390563 logs.go:276] 1 containers: [5eb00253940e3eeeae339c8a8234039a48b3efa17916267fccc2abcb00191cf4]
I0812 17:58:43.919480 390563 ssh_runner.go:195] Run: which crictl
I0812 17:58:43.922682 390563 cri.go:54] listing CRI containers in root : {State:all Name:kube-proxy Namespaces:[]}
I0812 17:58:43.922743 390563 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I0812 17:58:43.955044 390563 cri.go:89] found id: "f630daeff0617517e30ecb4fa8df7e1b893f85bce15ef1de4d21eff4ae5fbcc5"
I0812 17:58:43.955071 390563 cri.go:89] found id: ""
I0812 17:58:43.955081 390563 logs.go:276] 1 containers: [f630daeff0617517e30ecb4fa8df7e1b893f85bce15ef1de4d21eff4ae5fbcc5]
I0812 17:58:43.955136 390563 ssh_runner.go:195] Run: which crictl
I0812 17:58:43.958347 390563 cri.go:54] listing CRI containers in root : {State:all Name:kube-controller-manager Namespaces:[]}
I0812 17:58:43.958420 390563 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I0812 17:58:43.990587 390563 cri.go:89] found id: "f2a2a0b2bb6b732277d5594073a67536a8140a95757eada209ef572d9bd752d0"
I0812 17:58:43.990609 390563 cri.go:89] found id: ""
I0812 17:58:43.990619 390563 logs.go:276] 1 containers: [f2a2a0b2bb6b732277d5594073a67536a8140a95757eada209ef572d9bd752d0]
I0812 17:58:43.990679 390563 ssh_runner.go:195] Run: which crictl
I0812 17:58:43.994111 390563 cri.go:54] listing CRI containers in root : {State:all Name:kindnet Namespaces:[]}
I0812 17:58:43.994177 390563 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I0812 17:58:44.029140 390563 cri.go:89] found id: "975ffd4ef95f567e3d06db5ce1b5c1cc8b25e5ecc0a4d81f24f0044057aafe33"
I0812 17:58:44.029159 390563 cri.go:89] found id: ""
I0812 17:58:44.029167 390563 logs.go:276] 1 containers: [975ffd4ef95f567e3d06db5ce1b5c1cc8b25e5ecc0a4d81f24f0044057aafe33]
I0812 17:58:44.029210 390563 ssh_runner.go:195] Run: which crictl
I0812 17:58:44.032385 390563 logs.go:123] Gathering logs for CRI-O ...
I0812 17:58:44.032407 390563 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u crio -n 400"
I0812 17:58:44.110839 390563 logs.go:123] Gathering logs for container status ...
I0812 17:58:44.110878 390563 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I0812 17:58:44.153177 390563 logs.go:123] Gathering logs for kubelet ...
I0812 17:58:44.153209 390563 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I0812 17:58:44.220654 390563 logs.go:123] Gathering logs for dmesg ...
I0812 17:58:44.220692 390563 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I0812 17:58:44.246388 390563 logs.go:123] Gathering logs for kube-apiserver [b21e7ec861b31b114311f638f7f92785f0c7c7a2eb803886a0a3c4fb3b490742] ...
I0812 17:58:44.246422 390563 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 b21e7ec861b31b114311f638f7f92785f0c7c7a2eb803886a0a3c4fb3b490742"
I0812 17:58:44.289683 390563 logs.go:123] Gathering logs for coredns [d969a16ee6c44afacf5a8112150b3bfc693e5cedce2b0fc0272085ed571b18ca] ...
I0812 17:58:44.289716 390563 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 d969a16ee6c44afacf5a8112150b3bfc693e5cedce2b0fc0272085ed571b18ca"
I0812 17:58:44.326204 390563 logs.go:123] Gathering logs for kube-controller-manager [f2a2a0b2bb6b732277d5594073a67536a8140a95757eada209ef572d9bd752d0] ...
I0812 17:58:44.326238 390563 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 f2a2a0b2bb6b732277d5594073a67536a8140a95757eada209ef572d9bd752d0"
I0812 17:58:44.382593 390563 logs.go:123] Gathering logs for kindnet [975ffd4ef95f567e3d06db5ce1b5c1cc8b25e5ecc0a4d81f24f0044057aafe33] ...
I0812 17:58:44.382628 390563 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 975ffd4ef95f567e3d06db5ce1b5c1cc8b25e5ecc0a4d81f24f0044057aafe33"
I0812 17:58:44.423704 390563 logs.go:123] Gathering logs for describe nodes ...
I0812 17:58:44.423739 390563 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.30.3/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
I0812 17:58:44.520927 390563 logs.go:123] Gathering logs for etcd [a6e508d44055a56fcac43a3530f084186f88151812cea99189346bbd57cbba1b] ...
I0812 17:58:44.520960 390563 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 a6e508d44055a56fcac43a3530f084186f88151812cea99189346bbd57cbba1b"
I0812 17:58:44.564728 390563 logs.go:123] Gathering logs for kube-scheduler [5eb00253940e3eeeae339c8a8234039a48b3efa17916267fccc2abcb00191cf4] ...
I0812 17:58:44.564757 390563 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 5eb00253940e3eeeae339c8a8234039a48b3efa17916267fccc2abcb00191cf4"
I0812 17:58:44.604380 390563 logs.go:123] Gathering logs for kube-proxy [f630daeff0617517e30ecb4fa8df7e1b893f85bce15ef1de4d21eff4ae5fbcc5] ...
I0812 17:58:44.604414 390563 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 f630daeff0617517e30ecb4fa8df7e1b893f85bce15ef1de4d21eff4ae5fbcc5"
I0812 17:58:47.137306 390563 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0812 17:58:47.151349 390563 api_server.go:72] duration metric: took 2m3.221602847s to wait for apiserver process to appear ...
I0812 17:58:47.151384 390563 api_server.go:88] waiting for apiserver healthz status ...
I0812 17:58:47.151419 390563 cri.go:54] listing CRI containers in root : {State:all Name:kube-apiserver Namespaces:[]}
I0812 17:58:47.151462 390563 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I0812 17:58:47.183483 390563 cri.go:89] found id: "b21e7ec861b31b114311f638f7f92785f0c7c7a2eb803886a0a3c4fb3b490742"
I0812 17:58:47.183503 390563 cri.go:89] found id: ""
I0812 17:58:47.183510 390563 logs.go:276] 1 containers: [b21e7ec861b31b114311f638f7f92785f0c7c7a2eb803886a0a3c4fb3b490742]
I0812 17:58:47.183567 390563 ssh_runner.go:195] Run: which crictl
I0812 17:58:47.186872 390563 cri.go:54] listing CRI containers in root : {State:all Name:etcd Namespaces:[]}
I0812 17:58:47.186931 390563 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I0812 17:58:47.220219 390563 cri.go:89] found id: "a6e508d44055a56fcac43a3530f084186f88151812cea99189346bbd57cbba1b"
I0812 17:58:47.220240 390563 cri.go:89] found id: ""
I0812 17:58:47.220248 390563 logs.go:276] 1 containers: [a6e508d44055a56fcac43a3530f084186f88151812cea99189346bbd57cbba1b]
I0812 17:58:47.220301 390563 ssh_runner.go:195] Run: which crictl
I0812 17:58:47.223574 390563 cri.go:54] listing CRI containers in root : {State:all Name:coredns Namespaces:[]}
I0812 17:58:47.223644 390563 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I0812 17:58:47.256438 390563 cri.go:89] found id: "d969a16ee6c44afacf5a8112150b3bfc693e5cedce2b0fc0272085ed571b18ca"
I0812 17:58:47.256463 390563 cri.go:89] found id: ""
I0812 17:58:47.256471 390563 logs.go:276] 1 containers: [d969a16ee6c44afacf5a8112150b3bfc693e5cedce2b0fc0272085ed571b18ca]
I0812 17:58:47.256514 390563 ssh_runner.go:195] Run: which crictl
I0812 17:58:47.259825 390563 cri.go:54] listing CRI containers in root : {State:all Name:kube-scheduler Namespaces:[]}
I0812 17:58:47.259880 390563 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I0812 17:58:47.292370 390563 cri.go:89] found id: "5eb00253940e3eeeae339c8a8234039a48b3efa17916267fccc2abcb00191cf4"
I0812 17:58:47.292393 390563 cri.go:89] found id: ""
I0812 17:58:47.292403 390563 logs.go:276] 1 containers: [5eb00253940e3eeeae339c8a8234039a48b3efa17916267fccc2abcb00191cf4]
I0812 17:58:47.292457 390563 ssh_runner.go:195] Run: which crictl
I0812 17:58:47.295798 390563 cri.go:54] listing CRI containers in root : {State:all Name:kube-proxy Namespaces:[]}
I0812 17:58:47.295866 390563 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I0812 17:58:47.329445 390563 cri.go:89] found id: "f630daeff0617517e30ecb4fa8df7e1b893f85bce15ef1de4d21eff4ae5fbcc5"
I0812 17:58:47.329468 390563 cri.go:89] found id: ""
I0812 17:58:47.329477 390563 logs.go:276] 1 containers: [f630daeff0617517e30ecb4fa8df7e1b893f85bce15ef1de4d21eff4ae5fbcc5]
I0812 17:58:47.329526 390563 ssh_runner.go:195] Run: which crictl
I0812 17:58:47.332790 390563 cri.go:54] listing CRI containers in root : {State:all Name:kube-controller-manager Namespaces:[]}
I0812 17:58:47.332844 390563 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I0812 17:58:47.366998 390563 cri.go:89] found id: "f2a2a0b2bb6b732277d5594073a67536a8140a95757eada209ef572d9bd752d0"
I0812 17:58:47.367021 390563 cri.go:89] found id: ""
I0812 17:58:47.367030 390563 logs.go:276] 1 containers: [f2a2a0b2bb6b732277d5594073a67536a8140a95757eada209ef572d9bd752d0]
I0812 17:58:47.367073 390563 ssh_runner.go:195] Run: which crictl
I0812 17:58:47.370395 390563 cri.go:54] listing CRI containers in root : {State:all Name:kindnet Namespaces:[]}
I0812 17:58:47.370456 390563 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I0812 17:58:47.404045 390563 cri.go:89] found id: "975ffd4ef95f567e3d06db5ce1b5c1cc8b25e5ecc0a4d81f24f0044057aafe33"
I0812 17:58:47.404070 390563 cri.go:89] found id: ""
I0812 17:58:47.404080 390563 logs.go:276] 1 containers: [975ffd4ef95f567e3d06db5ce1b5c1cc8b25e5ecc0a4d81f24f0044057aafe33]
I0812 17:58:47.404141 390563 ssh_runner.go:195] Run: which crictl
I0812 17:58:47.407570 390563 logs.go:123] Gathering logs for coredns [d969a16ee6c44afacf5a8112150b3bfc693e5cedce2b0fc0272085ed571b18ca] ...
I0812 17:58:47.407593 390563 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 d969a16ee6c44afacf5a8112150b3bfc693e5cedce2b0fc0272085ed571b18ca"
I0812 17:58:47.443300 390563 logs.go:123] Gathering logs for kube-scheduler [5eb00253940e3eeeae339c8a8234039a48b3efa17916267fccc2abcb00191cf4] ...
I0812 17:58:47.443331 390563 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 5eb00253940e3eeeae339c8a8234039a48b3efa17916267fccc2abcb00191cf4"
I0812 17:58:47.482608 390563 logs.go:123] Gathering logs for kube-controller-manager [f2a2a0b2bb6b732277d5594073a67536a8140a95757eada209ef572d9bd752d0] ...
I0812 17:58:47.482639 390563 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 f2a2a0b2bb6b732277d5594073a67536a8140a95757eada209ef572d9bd752d0"
I0812 17:58:47.538339 390563 logs.go:123] Gathering logs for kindnet [975ffd4ef95f567e3d06db5ce1b5c1cc8b25e5ecc0a4d81f24f0044057aafe33] ...
I0812 17:58:47.538372 390563 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 975ffd4ef95f567e3d06db5ce1b5c1cc8b25e5ecc0a4d81f24f0044057aafe33"
I0812 17:58:47.578495 390563 logs.go:123] Gathering logs for container status ...
I0812 17:58:47.578529 390563 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I0812 17:58:47.620501 390563 logs.go:123] Gathering logs for kubelet ...
I0812 17:58:47.620529 390563 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I0812 17:58:47.689495 390563 logs.go:123] Gathering logs for etcd [a6e508d44055a56fcac43a3530f084186f88151812cea99189346bbd57cbba1b] ...
I0812 17:58:47.689536 390563 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 a6e508d44055a56fcac43a3530f084186f88151812cea99189346bbd57cbba1b"
I0812 17:58:47.733364 390563 logs.go:123] Gathering logs for kube-apiserver [b21e7ec861b31b114311f638f7f92785f0c7c7a2eb803886a0a3c4fb3b490742] ...
I0812 17:58:47.733400 390563 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 b21e7ec861b31b114311f638f7f92785f0c7c7a2eb803886a0a3c4fb3b490742"
I0812 17:58:47.777653 390563 logs.go:123] Gathering logs for kube-proxy [f630daeff0617517e30ecb4fa8df7e1b893f85bce15ef1de4d21eff4ae5fbcc5] ...
I0812 17:58:47.777689 390563 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 f630daeff0617517e30ecb4fa8df7e1b893f85bce15ef1de4d21eff4ae5fbcc5"
I0812 17:58:47.810603 390563 logs.go:123] Gathering logs for CRI-O ...
I0812 17:58:47.810638 390563 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u crio -n 400"
I0812 17:58:47.884555 390563 logs.go:123] Gathering logs for dmesg ...
I0812 17:58:47.884592 390563 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I0812 17:58:47.912023 390563 logs.go:123] Gathering logs for describe nodes ...
I0812 17:58:47.912051 390563 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.30.3/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
I0812 17:58:50.506637 390563 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8443/healthz ...
I0812 17:58:50.510241 390563 api_server.go:279] https://192.168.49.2:8443/healthz returned 200:
ok
I0812 17:58:50.511180 390563 api_server.go:141] control plane version: v1.30.3
I0812 17:58:50.511221 390563 api_server.go:131] duration metric: took 3.359829706s to wait for apiserver health ...
I0812 17:58:50.511232 390563 system_pods.go:43] waiting for kube-system pods to appear ...
I0812 17:58:50.511260 390563 cri.go:54] listing CRI containers in root : {State:all Name:kube-apiserver Namespaces:[]}
I0812 17:58:50.511317 390563 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I0812 17:58:50.545502 390563 cri.go:89] found id: "b21e7ec861b31b114311f638f7f92785f0c7c7a2eb803886a0a3c4fb3b490742"
I0812 17:58:50.545524 390563 cri.go:89] found id: ""
I0812 17:58:50.545532 390563 logs.go:276] 1 containers: [b21e7ec861b31b114311f638f7f92785f0c7c7a2eb803886a0a3c4fb3b490742]
I0812 17:58:50.545589 390563 ssh_runner.go:195] Run: which crictl
I0812 17:58:50.549097 390563 cri.go:54] listing CRI containers in root : {State:all Name:etcd Namespaces:[]}
I0812 17:58:50.549168 390563 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I0812 17:58:50.581832 390563 cri.go:89] found id: "a6e508d44055a56fcac43a3530f084186f88151812cea99189346bbd57cbba1b"
I0812 17:58:50.581858 390563 cri.go:89] found id: ""
I0812 17:58:50.581867 390563 logs.go:276] 1 containers: [a6e508d44055a56fcac43a3530f084186f88151812cea99189346bbd57cbba1b]
I0812 17:58:50.581913 390563 ssh_runner.go:195] Run: which crictl
I0812 17:58:50.585326 390563 cri.go:54] listing CRI containers in root : {State:all Name:coredns Namespaces:[]}
I0812 17:58:50.585396 390563 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I0812 17:58:50.617666 390563 cri.go:89] found id: "d969a16ee6c44afacf5a8112150b3bfc693e5cedce2b0fc0272085ed571b18ca"
I0812 17:58:50.617691 390563 cri.go:89] found id: ""
I0812 17:58:50.617704 390563 logs.go:276] 1 containers: [d969a16ee6c44afacf5a8112150b3bfc693e5cedce2b0fc0272085ed571b18ca]
I0812 17:58:50.617754 390563 ssh_runner.go:195] Run: which crictl
I0812 17:58:50.620900 390563 cri.go:54] listing CRI containers in root : {State:all Name:kube-scheduler Namespaces:[]}
I0812 17:58:50.620959 390563 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I0812 17:58:50.653724 390563 cri.go:89] found id: "5eb00253940e3eeeae339c8a8234039a48b3efa17916267fccc2abcb00191cf4"
I0812 17:58:50.653749 390563 cri.go:89] found id: ""
I0812 17:58:50.653759 390563 logs.go:276] 1 containers: [5eb00253940e3eeeae339c8a8234039a48b3efa17916267fccc2abcb00191cf4]
I0812 17:58:50.653811 390563 ssh_runner.go:195] Run: which crictl
I0812 17:58:50.657039 390563 cri.go:54] listing CRI containers in root : {State:all Name:kube-proxy Namespaces:[]}
I0812 17:58:50.657096 390563 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I0812 17:58:50.689747 390563 cri.go:89] found id: "f630daeff0617517e30ecb4fa8df7e1b893f85bce15ef1de4d21eff4ae5fbcc5"
I0812 17:58:50.689774 390563 cri.go:89] found id: ""
I0812 17:58:50.689783 390563 logs.go:276] 1 containers: [f630daeff0617517e30ecb4fa8df7e1b893f85bce15ef1de4d21eff4ae5fbcc5]
I0812 17:58:50.689849 390563 ssh_runner.go:195] Run: which crictl
I0812 17:58:50.693316 390563 cri.go:54] listing CRI containers in root : {State:all Name:kube-controller-manager Namespaces:[]}
I0812 17:58:50.693383 390563 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I0812 17:58:50.726612 390563 cri.go:89] found id: "f2a2a0b2bb6b732277d5594073a67536a8140a95757eada209ef572d9bd752d0"
I0812 17:58:50.726638 390563 cri.go:89] found id: ""
I0812 17:58:50.726647 390563 logs.go:276] 1 containers: [f2a2a0b2bb6b732277d5594073a67536a8140a95757eada209ef572d9bd752d0]
I0812 17:58:50.726698 390563 ssh_runner.go:195] Run: which crictl
I0812 17:58:50.730051 390563 cri.go:54] listing CRI containers in root : {State:all Name:kindnet Namespaces:[]}
I0812 17:58:50.730112 390563 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I0812 17:58:50.763224 390563 cri.go:89] found id: "975ffd4ef95f567e3d06db5ce1b5c1cc8b25e5ecc0a4d81f24f0044057aafe33"
I0812 17:58:50.763246 390563 cri.go:89] found id: ""
I0812 17:58:50.763254 390563 logs.go:276] 1 containers: [975ffd4ef95f567e3d06db5ce1b5c1cc8b25e5ecc0a4d81f24f0044057aafe33]
I0812 17:58:50.763307 390563 ssh_runner.go:195] Run: which crictl
I0812 17:58:50.766628 390563 logs.go:123] Gathering logs for CRI-O ...
I0812 17:58:50.766654 390563 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u crio -n 400"
I0812 17:58:50.842899 390563 logs.go:123] Gathering logs for container status ...
I0812 17:58:50.842939 390563 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I0812 17:58:50.883331 390563 logs.go:123] Gathering logs for describe nodes ...
I0812 17:58:50.883360 390563 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.30.3/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
I0812 17:58:50.977224 390563 logs.go:123] Gathering logs for etcd [a6e508d44055a56fcac43a3530f084186f88151812cea99189346bbd57cbba1b] ...
I0812 17:58:50.977255 390563 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 a6e508d44055a56fcac43a3530f084186f88151812cea99189346bbd57cbba1b"
I0812 17:58:51.019928 390563 logs.go:123] Gathering logs for kube-scheduler [5eb00253940e3eeeae339c8a8234039a48b3efa17916267fccc2abcb00191cf4] ...
I0812 17:58:51.019960 390563 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 5eb00253940e3eeeae339c8a8234039a48b3efa17916267fccc2abcb00191cf4"
I0812 17:58:51.059028 390563 logs.go:123] Gathering logs for kube-controller-manager [f2a2a0b2bb6b732277d5594073a67536a8140a95757eada209ef572d9bd752d0] ...
I0812 17:58:51.059056 390563 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 f2a2a0b2bb6b732277d5594073a67536a8140a95757eada209ef572d9bd752d0"
I0812 17:58:51.113750 390563 logs.go:123] Gathering logs for kube-proxy [f630daeff0617517e30ecb4fa8df7e1b893f85bce15ef1de4d21eff4ae5fbcc5] ...
I0812 17:58:51.113784 390563 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 f630daeff0617517e30ecb4fa8df7e1b893f85bce15ef1de4d21eff4ae5fbcc5"
I0812 17:58:51.147910 390563 logs.go:123] Gathering logs for kindnet [975ffd4ef95f567e3d06db5ce1b5c1cc8b25e5ecc0a4d81f24f0044057aafe33] ...
I0812 17:58:51.147935 390563 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 975ffd4ef95f567e3d06db5ce1b5c1cc8b25e5ecc0a4d81f24f0044057aafe33"
I0812 17:58:51.188964 390563 logs.go:123] Gathering logs for kubelet ...
I0812 17:58:51.188994 390563 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I0812 17:58:51.257347 390563 logs.go:123] Gathering logs for dmesg ...
I0812 17:58:51.257384 390563 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I0812 17:58:51.283038 390563 logs.go:123] Gathering logs for kube-apiserver [b21e7ec861b31b114311f638f7f92785f0c7c7a2eb803886a0a3c4fb3b490742] ...
I0812 17:58:51.283075 390563 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 b21e7ec861b31b114311f638f7f92785f0c7c7a2eb803886a0a3c4fb3b490742"
I0812 17:58:51.326307 390563 logs.go:123] Gathering logs for coredns [d969a16ee6c44afacf5a8112150b3bfc693e5cedce2b0fc0272085ed571b18ca] ...
I0812 17:58:51.326339 390563 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 d969a16ee6c44afacf5a8112150b3bfc693e5cedce2b0fc0272085ed571b18ca"
I0812 17:58:53.875430 390563 system_pods.go:59] 19 kube-system pods found
I0812 17:58:53.875461 390563 system_pods.go:61] "coredns-7db6d8ff4d-k9fzz" [7ecbdf95-7098-417e-80c9-93418d94eff4] Running
I0812 17:58:53.875466 390563 system_pods.go:61] "csi-hostpath-attacher-0" [8a39a2ce-633c-42e7-ab33-b77e0a45ba2a] Running
I0812 17:58:53.875471 390563 system_pods.go:61] "csi-hostpath-resizer-0" [620a03c9-f9da-406e-816b-507f8015ad5d] Running
I0812 17:58:53.875474 390563 system_pods.go:61] "csi-hostpathplugin-g2gl5" [7c7fbf52-5654-45f6-a4ea-6ddb8934d9a9] Running
I0812 17:58:53.875478 390563 system_pods.go:61] "etcd-addons-808918" [f2ab8b4d-94b2-4772-b9d7-87cd87a3ff7a] Running
I0812 17:58:53.875481 390563 system_pods.go:61] "kindnet-ssl56" [0d4b6fc7-1e16-40ee-aa87-d18e020dc734] Running
I0812 17:58:53.875485 390563 system_pods.go:61] "kube-apiserver-addons-808918" [f7c75b7c-2561-4198-952c-85452e8f0d6d] Running
I0812 17:58:53.875488 390563 system_pods.go:61] "kube-controller-manager-addons-808918" [2c8971bf-406d-4122-941f-aeae4fe305ea] Running
I0812 17:58:53.875491 390563 system_pods.go:61] "kube-ingress-dns-minikube" [2ee227fa-4a7a-4608-973b-6fb4ed08f281] Running
I0812 17:58:53.875495 390563 system_pods.go:61] "kube-proxy-gq98h" [af1f17d3-f323-4a30-9d1c-b48c5d6566cc] Running
I0812 17:58:53.875499 390563 system_pods.go:61] "kube-scheduler-addons-808918" [9782f8d1-f921-49e5-bfec-ccd1bba260b3] Running
I0812 17:58:53.875504 390563 system_pods.go:61] "metrics-server-c59844bb4-5vl77" [06131209-4466-4549-9ec2-884be212e6da] Running
I0812 17:58:53.875512 390563 system_pods.go:61] "nvidia-device-plugin-daemonset-7wrl9" [1a324a3a-b1f4-4dec-8237-29ff64b3aee0] Running
I0812 17:58:53.875517 390563 system_pods.go:61] "registry-698f998955-l2xhc" [4afae8e5-0b35-4a54-a3fb-7fdfabfc290d] Running
I0812 17:58:53.875528 390563 system_pods.go:61] "registry-proxy-vvb8n" [2c8acbf5-88cf-4f84-8a6f-726b9bc28164] Running
I0812 17:58:53.875532 390563 system_pods.go:61] "snapshot-controller-745499f584-ml8p9" [ddbb21fe-2713-4370-86a5-cffa138e0f13] Running
I0812 17:58:53.875537 390563 system_pods.go:61] "snapshot-controller-745499f584-txzn6" [29f5cd61-ae32-47ba-903b-830028bac519] Running
I0812 17:58:53.875542 390563 system_pods.go:61] "storage-provisioner" [f6550f63-491f-4cfd-bdad-2f7ecb7bbc6a] Running
I0812 17:58:53.875546 390563 system_pods.go:61] "tiller-deploy-6677d64bcd-xgzk5" [dc8fa4a7-5a0d-4d21-a2e7-9f4a977b6008] Running
I0812 17:58:53.875555 390563 system_pods.go:74] duration metric: took 3.3643155s to wait for pod list to return data ...
I0812 17:58:53.875566 390563 default_sa.go:34] waiting for default service account to be created ...
I0812 17:58:53.877307 390563 default_sa.go:45] found service account: "default"
I0812 17:58:53.877326 390563 default_sa.go:55] duration metric: took 1.75297ms for default service account to be created ...
I0812 17:58:53.877333 390563 system_pods.go:116] waiting for k8s-apps to be running ...
I0812 17:58:53.886140 390563 system_pods.go:86] 19 kube-system pods found
I0812 17:58:53.886164 390563 system_pods.go:89] "coredns-7db6d8ff4d-k9fzz" [7ecbdf95-7098-417e-80c9-93418d94eff4] Running
I0812 17:58:53.886170 390563 system_pods.go:89] "csi-hostpath-attacher-0" [8a39a2ce-633c-42e7-ab33-b77e0a45ba2a] Running
I0812 17:58:53.886174 390563 system_pods.go:89] "csi-hostpath-resizer-0" [620a03c9-f9da-406e-816b-507f8015ad5d] Running
I0812 17:58:53.886179 390563 system_pods.go:89] "csi-hostpathplugin-g2gl5" [7c7fbf52-5654-45f6-a4ea-6ddb8934d9a9] Running
I0812 17:58:53.886185 390563 system_pods.go:89] "etcd-addons-808918" [f2ab8b4d-94b2-4772-b9d7-87cd87a3ff7a] Running
I0812 17:58:53.886192 390563 system_pods.go:89] "kindnet-ssl56" [0d4b6fc7-1e16-40ee-aa87-d18e020dc734] Running
I0812 17:58:53.886202 390563 system_pods.go:89] "kube-apiserver-addons-808918" [f7c75b7c-2561-4198-952c-85452e8f0d6d] Running
I0812 17:58:53.886212 390563 system_pods.go:89] "kube-controller-manager-addons-808918" [2c8971bf-406d-4122-941f-aeae4fe305ea] Running
I0812 17:58:53.886221 390563 system_pods.go:89] "kube-ingress-dns-minikube" [2ee227fa-4a7a-4608-973b-6fb4ed08f281] Running
I0812 17:58:53.886229 390563 system_pods.go:89] "kube-proxy-gq98h" [af1f17d3-f323-4a30-9d1c-b48c5d6566cc] Running
I0812 17:58:53.886234 390563 system_pods.go:89] "kube-scheduler-addons-808918" [9782f8d1-f921-49e5-bfec-ccd1bba260b3] Running
I0812 17:58:53.886240 390563 system_pods.go:89] "metrics-server-c59844bb4-5vl77" [06131209-4466-4549-9ec2-884be212e6da] Running
I0812 17:58:53.886245 390563 system_pods.go:89] "nvidia-device-plugin-daemonset-7wrl9" [1a324a3a-b1f4-4dec-8237-29ff64b3aee0] Running
I0812 17:58:53.886253 390563 system_pods.go:89] "registry-698f998955-l2xhc" [4afae8e5-0b35-4a54-a3fb-7fdfabfc290d] Running
I0812 17:58:53.886257 390563 system_pods.go:89] "registry-proxy-vvb8n" [2c8acbf5-88cf-4f84-8a6f-726b9bc28164] Running
I0812 17:58:53.886264 390563 system_pods.go:89] "snapshot-controller-745499f584-ml8p9" [ddbb21fe-2713-4370-86a5-cffa138e0f13] Running
I0812 17:58:53.886268 390563 system_pods.go:89] "snapshot-controller-745499f584-txzn6" [29f5cd61-ae32-47ba-903b-830028bac519] Running
I0812 17:58:53.886275 390563 system_pods.go:89] "storage-provisioner" [f6550f63-491f-4cfd-bdad-2f7ecb7bbc6a] Running
I0812 17:58:53.886283 390563 system_pods.go:89] "tiller-deploy-6677d64bcd-xgzk5" [dc8fa4a7-5a0d-4d21-a2e7-9f4a977b6008] Running
I0812 17:58:53.886294 390563 system_pods.go:126] duration metric: took 8.955191ms to wait for k8s-apps to be running ...
I0812 17:58:53.886306 390563 system_svc.go:44] waiting for kubelet service to be running ....
I0812 17:58:53.886360 390563 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I0812 17:58:53.897444 390563 system_svc.go:56] duration metric: took 11.132132ms WaitForService to wait for kubelet
I0812 17:58:53.897470 390563 kubeadm.go:582] duration metric: took 2m9.967729017s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0812 17:58:53.897494 390563 node_conditions.go:102] verifying NodePressure condition ...
I0812 17:58:53.900197 390563 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I0812 17:58:53.900223 390563 node_conditions.go:123] node cpu capacity is 8
I0812 17:58:53.900240 390563 node_conditions.go:105] duration metric: took 2.740471ms to run NodePressure ...
I0812 17:58:53.900255 390563 start.go:241] waiting for startup goroutines ...
I0812 17:58:53.900265 390563 start.go:246] waiting for cluster config update ...
I0812 17:58:53.900287 390563 start.go:255] writing updated cluster config ...
I0812 17:58:53.900568 390563 ssh_runner.go:195] Run: rm -f paused
I0812 17:58:53.950159 390563 start.go:600] kubectl: 1.30.3, cluster: 1.30.3 (minor skew: 0)
I0812 17:58:53.952484 390563 out.go:177] * Done! kubectl is now configured to use "addons-808918" cluster and "default" namespace by default
==> CRI-O <==
Aug 12 18:02:25 addons-808918 crio[1029]: time="2024-08-12 18:02:25.809120648Z" level=info msg="Got pod network &{Name:ingress-nginx-controller-6d9bd977d4-7f6cj Namespace:ingress-nginx ID:57feaccddeb5d2d66dd75e510029aab40142600ca7b55c45924689d23db15888 UID:bb94187a-d528-48d4-9174-84f9ba45e9f0 NetNS:/var/run/netns/5dc0af64-aedd-460d-b4e0-1911dd802173 Networks:[{Name:kindnet Ifname:eth0}] RuntimeConfig:map[kindnet:{IP: MAC: PortMappings:[] Bandwidth:<nil> IpRanges:[]}] Aliases:map[]}"
Aug 12 18:02:25 addons-808918 crio[1029]: time="2024-08-12 18:02:25.809258251Z" level=info msg="Deleting pod ingress-nginx_ingress-nginx-controller-6d9bd977d4-7f6cj from CNI network \"kindnet\" (type=ptp)"
Aug 12 18:02:25 addons-808918 crio[1029]: time="2024-08-12 18:02:25.847520172Z" level=info msg="Stopped pod sandbox: 57feaccddeb5d2d66dd75e510029aab40142600ca7b55c45924689d23db15888" id=4cffb49c-379d-40dd-9771-6b01e8ac59d3 name=/runtime.v1.RuntimeService/StopPodSandbox
Aug 12 18:02:26 addons-808918 crio[1029]: time="2024-08-12 18:02:26.144610939Z" level=info msg="Removing container: 76609dfb374cc0231166f71e6e4efe321ae0676a3658c7b2fb0b51dbcfec6bf8" id=90e34dd6-8ecf-404b-b319-dbe2ea645fb8 name=/runtime.v1.RuntimeService/RemoveContainer
Aug 12 18:02:26 addons-808918 crio[1029]: time="2024-08-12 18:02:26.157701300Z" level=info msg="Removed container 76609dfb374cc0231166f71e6e4efe321ae0676a3658c7b2fb0b51dbcfec6bf8: ingress-nginx/ingress-nginx-controller-6d9bd977d4-7f6cj/controller" id=90e34dd6-8ecf-404b-b319-dbe2ea645fb8 name=/runtime.v1.RuntimeService/RemoveContainer
Aug 12 18:02:30 addons-808918 crio[1029]: time="2024-08-12 18:02:30.044747112Z" level=info msg="Removing container: 592b81b132a6a35727d1282d0b6a97fdd003fa9fcef645145ce0585398584b7b" id=97ad5931-5527-4d4d-84a2-fdda7b388410 name=/runtime.v1.RuntimeService/RemoveContainer
Aug 12 18:02:30 addons-808918 crio[1029]: time="2024-08-12 18:02:30.058923843Z" level=info msg="Removed container 592b81b132a6a35727d1282d0b6a97fdd003fa9fcef645145ce0585398584b7b: ingress-nginx/ingress-nginx-admission-patch-l95bm/patch" id=97ad5931-5527-4d4d-84a2-fdda7b388410 name=/runtime.v1.RuntimeService/RemoveContainer
Aug 12 18:02:30 addons-808918 crio[1029]: time="2024-08-12 18:02:30.060368436Z" level=info msg="Removing container: c0a00532c52c9dd30faa3ecd38a30bfe1496804e259de8333ad69068c4060d5a" id=79e772b1-c726-446e-8384-320c734073cc name=/runtime.v1.RuntimeService/RemoveContainer
Aug 12 18:02:30 addons-808918 crio[1029]: time="2024-08-12 18:02:30.074904661Z" level=info msg="Removed container c0a00532c52c9dd30faa3ecd38a30bfe1496804e259de8333ad69068c4060d5a: ingress-nginx/ingress-nginx-admission-create-dhrdt/create" id=79e772b1-c726-446e-8384-320c734073cc name=/runtime.v1.RuntimeService/RemoveContainer
Aug 12 18:02:30 addons-808918 crio[1029]: time="2024-08-12 18:02:30.076304399Z" level=info msg="Stopping pod sandbox: 57feaccddeb5d2d66dd75e510029aab40142600ca7b55c45924689d23db15888" id=df633b1b-dd32-48ab-9d18-ea941336e1d9 name=/runtime.v1.RuntimeService/StopPodSandbox
Aug 12 18:02:30 addons-808918 crio[1029]: time="2024-08-12 18:02:30.076342943Z" level=info msg="Stopped pod sandbox (already stopped): 57feaccddeb5d2d66dd75e510029aab40142600ca7b55c45924689d23db15888" id=df633b1b-dd32-48ab-9d18-ea941336e1d9 name=/runtime.v1.RuntimeService/StopPodSandbox
Aug 12 18:02:30 addons-808918 crio[1029]: time="2024-08-12 18:02:30.076644580Z" level=info msg="Removing pod sandbox: 57feaccddeb5d2d66dd75e510029aab40142600ca7b55c45924689d23db15888" id=340f46ce-c468-4ad3-99f9-42978ae88396 name=/runtime.v1.RuntimeService/RemovePodSandbox
Aug 12 18:02:30 addons-808918 crio[1029]: time="2024-08-12 18:02:30.083793324Z" level=info msg="Removed pod sandbox: 57feaccddeb5d2d66dd75e510029aab40142600ca7b55c45924689d23db15888" id=340f46ce-c468-4ad3-99f9-42978ae88396 name=/runtime.v1.RuntimeService/RemovePodSandbox
Aug 12 18:02:30 addons-808918 crio[1029]: time="2024-08-12 18:02:30.084220102Z" level=info msg="Stopping pod sandbox: 3d4aa53ef6e7b05ca27449be9bd12f9aa67e1717b78302102b68eb21fa46591f" id=7f01ef66-50fd-498a-b543-7ac656e35db5 name=/runtime.v1.RuntimeService/StopPodSandbox
Aug 12 18:02:30 addons-808918 crio[1029]: time="2024-08-12 18:02:30.084257364Z" level=info msg="Stopped pod sandbox (already stopped): 3d4aa53ef6e7b05ca27449be9bd12f9aa67e1717b78302102b68eb21fa46591f" id=7f01ef66-50fd-498a-b543-7ac656e35db5 name=/runtime.v1.RuntimeService/StopPodSandbox
Aug 12 18:02:30 addons-808918 crio[1029]: time="2024-08-12 18:02:30.084584116Z" level=info msg="Removing pod sandbox: 3d4aa53ef6e7b05ca27449be9bd12f9aa67e1717b78302102b68eb21fa46591f" id=5f619b94-2b7c-485c-a6d0-0245f170c321 name=/runtime.v1.RuntimeService/RemovePodSandbox
Aug 12 18:02:30 addons-808918 crio[1029]: time="2024-08-12 18:02:30.091486669Z" level=info msg="Removed pod sandbox: 3d4aa53ef6e7b05ca27449be9bd12f9aa67e1717b78302102b68eb21fa46591f" id=5f619b94-2b7c-485c-a6d0-0245f170c321 name=/runtime.v1.RuntimeService/RemovePodSandbox
Aug 12 18:02:30 addons-808918 crio[1029]: time="2024-08-12 18:02:30.091877137Z" level=info msg="Stopping pod sandbox: bf1ce46b3022ecf705faac9a7ceaf8d504d3e828006e27fcecabfb2e4259c262" id=b0a019fb-c715-43b6-9cc4-e4ba5062834d name=/runtime.v1.RuntimeService/StopPodSandbox
Aug 12 18:02:30 addons-808918 crio[1029]: time="2024-08-12 18:02:30.091910435Z" level=info msg="Stopped pod sandbox (already stopped): bf1ce46b3022ecf705faac9a7ceaf8d504d3e828006e27fcecabfb2e4259c262" id=b0a019fb-c715-43b6-9cc4-e4ba5062834d name=/runtime.v1.RuntimeService/StopPodSandbox
Aug 12 18:02:30 addons-808918 crio[1029]: time="2024-08-12 18:02:30.092164177Z" level=info msg="Removing pod sandbox: bf1ce46b3022ecf705faac9a7ceaf8d504d3e828006e27fcecabfb2e4259c262" id=c663b2ec-799f-4495-9627-de3abafceda5 name=/runtime.v1.RuntimeService/RemovePodSandbox
Aug 12 18:02:30 addons-808918 crio[1029]: time="2024-08-12 18:02:30.098929261Z" level=info msg="Removed pod sandbox: bf1ce46b3022ecf705faac9a7ceaf8d504d3e828006e27fcecabfb2e4259c262" id=c663b2ec-799f-4495-9627-de3abafceda5 name=/runtime.v1.RuntimeService/RemovePodSandbox
Aug 12 18:02:30 addons-808918 crio[1029]: time="2024-08-12 18:02:30.099537025Z" level=info msg="Stopping pod sandbox: 6ff746bcb1566973e1a100b2a11b4d806156faaa2b804c80dd8e440e73a181f0" id=e2391eda-278b-4ab6-90d2-59a9934bcfc2 name=/runtime.v1.RuntimeService/StopPodSandbox
Aug 12 18:02:30 addons-808918 crio[1029]: time="2024-08-12 18:02:30.099598304Z" level=info msg="Stopped pod sandbox (already stopped): 6ff746bcb1566973e1a100b2a11b4d806156faaa2b804c80dd8e440e73a181f0" id=e2391eda-278b-4ab6-90d2-59a9934bcfc2 name=/runtime.v1.RuntimeService/StopPodSandbox
Aug 12 18:02:30 addons-808918 crio[1029]: time="2024-08-12 18:02:30.099932260Z" level=info msg="Removing pod sandbox: 6ff746bcb1566973e1a100b2a11b4d806156faaa2b804c80dd8e440e73a181f0" id=8e06f410-3c32-4b51-ae40-6681afeaad4d name=/runtime.v1.RuntimeService/RemovePodSandbox
Aug 12 18:02:30 addons-808918 crio[1029]: time="2024-08-12 18:02:30.106460613Z" level=info msg="Removed pod sandbox: 6ff746bcb1566973e1a100b2a11b4d806156faaa2b804c80dd8e440e73a181f0" id=8e06f410-3c32-4b51-ae40-6681afeaad4d name=/runtime.v1.RuntimeService/RemovePodSandbox
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD
ab51791cbf777 docker.io/kicbase/echo-server@sha256:127ac38a2bb9537b7f252addff209ea6801edcac8a92c8b1104dacd66a583ed6 8 seconds ago Running hello-world-app 0 aa975950414e2 hello-world-app-6778b5fc9f-9trgx
46f1dbb737a5d docker.io/library/nginx@sha256:208b70eefac13ee9be00e486f79c695b15cef861c680527171a27d253d834be9 2 minutes ago Running nginx 0 5208b3d84df68 nginx
86ca61a579412 gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e 3 minutes ago Running busybox 0 2d92c70a60f65 busybox
da7100d440f13 registry.k8s.io/metrics-server/metrics-server@sha256:31f034feb3f16062e93be7c40efc596553c89de172e2e412e588f02382388872 4 minutes ago Running metrics-server 0 b7554934ab187 metrics-server-c59844bb4-5vl77
d969a16ee6c44 cbb01a7bd410dc08ba382018ab909a674fb0e48687f0c00797ed5bc34fcc6bb4 5 minutes ago Running coredns 0 968d70ba994b8 coredns-7db6d8ff4d-k9fzz
f81aec1fdd68d 6e38f40d628db3002f5617342c8872c935de530d867d0f709a2fbda1a302a562 5 minutes ago Running storage-provisioner 0 544b13eb4226f storage-provisioner
975ffd4ef95f5 docker.io/kindest/kindnetd@sha256:4067b91686869e19bac601aec305ba55d2e74cdcb91347869bfb4fd3a26cd3c3 5 minutes ago Running kindnet-cni 0 b9a57a8664c2e kindnet-ssl56
f630daeff0617 55bb025d2cfa592b9381d01e122e72a1ed4b29ca32f86b7d289d99da794784d1 5 minutes ago Running kube-proxy 0 8c066e9b9e1af kube-proxy-gq98h
f2a2a0b2bb6b7 76932a3b37d7eb138c8f47c9a2b4218f0466dd273badf856f2ce2f0277e15b5e 6 minutes ago Running kube-controller-manager 0 5c9bb469c5392 kube-controller-manager-addons-808918
b21e7ec861b31 1f6d574d502f3b61c851b1bbd4ef2a964ce4c70071dd8da556f2d490d36b095d 6 minutes ago Running kube-apiserver 0 0032c7dd24f66 kube-apiserver-addons-808918
5eb00253940e3 3edc18e7b76722eb2eb37a0858c09caacbd422d6e0cae4c2e5ce67bc9a9795e2 6 minutes ago Running kube-scheduler 0 00c53ee25b557 kube-scheduler-addons-808918
a6e508d44055a 3861cfcd7c04ccac1f062788eca39487248527ef0c0cfd477a83d7691a75a899 6 minutes ago Running etcd 0 b6199bf5992ad etcd-addons-808918
==> coredns [d969a16ee6c44afacf5a8112150b3bfc693e5cedce2b0fc0272085ed571b18ca] <==
[INFO] 10.244.0.3:58397 - 1135 "A IN registry.kube-system.svc.cluster.local.cluster.local. udp 70 false 512" NXDOMAIN qr,aa,rd 163 0.000053323s
[INFO] 10.244.0.3:43126 - 47379 "AAAA IN registry.kube-system.svc.cluster.local.us-central1-a.c.k8s-minikube.internal. udp 94 false 512" NXDOMAIN qr,rd,ra 94 0.003766495s
[INFO] 10.244.0.3:43126 - 58143 "A IN registry.kube-system.svc.cluster.local.us-central1-a.c.k8s-minikube.internal. udp 94 false 512" NXDOMAIN qr,rd,ra 94 0.004344704s
[INFO] 10.244.0.3:48987 - 40777 "A IN registry.kube-system.svc.cluster.local.c.k8s-minikube.internal. udp 80 false 512" NXDOMAIN qr,rd,ra 80 0.003650965s
[INFO] 10.244.0.3:48987 - 40522 "AAAA IN registry.kube-system.svc.cluster.local.c.k8s-minikube.internal. udp 80 false 512" NXDOMAIN qr,rd,ra 80 0.005645767s
[INFO] 10.244.0.3:35128 - 12458 "A IN registry.kube-system.svc.cluster.local.google.internal. udp 72 false 512" NXDOMAIN qr,rd,ra 72 0.005522241s
[INFO] 10.244.0.3:35128 - 45476 "AAAA IN registry.kube-system.svc.cluster.local.google.internal. udp 72 false 512" NXDOMAIN qr,rd,ra 72 0.008769536s
[INFO] 10.244.0.3:58316 - 11007 "A IN registry.kube-system.svc.cluster.local. udp 56 false 512" NOERROR qr,aa,rd 110 0.000065755s
[INFO] 10.244.0.3:58316 - 41978 "AAAA IN registry.kube-system.svc.cluster.local. udp 56 false 512" NOERROR qr,aa,rd 149 0.000111315s
[INFO] 10.244.0.21:45461 - 48715 "A IN storage.googleapis.com.gcp-auth.svc.cluster.local. udp 78 false 1232" NXDOMAIN qr,aa,rd 160 0.000273812s
[INFO] 10.244.0.21:50523 - 9282 "AAAA IN storage.googleapis.com.gcp-auth.svc.cluster.local. udp 78 false 1232" NXDOMAIN qr,aa,rd 160 0.000347s
[INFO] 10.244.0.21:34244 - 2480 "A IN storage.googleapis.com.svc.cluster.local. udp 69 false 1232" NXDOMAIN qr,aa,rd 151 0.000134761s
[INFO] 10.244.0.21:37314 - 56706 "AAAA IN storage.googleapis.com.svc.cluster.local. udp 69 false 1232" NXDOMAIN qr,aa,rd 151 0.000179741s
[INFO] 10.244.0.21:35511 - 3315 "A IN storage.googleapis.com.cluster.local. udp 65 false 1232" NXDOMAIN qr,aa,rd 147 0.00010295s
[INFO] 10.244.0.21:41906 - 51774 "AAAA IN storage.googleapis.com.cluster.local. udp 65 false 1232" NXDOMAIN qr,aa,rd 147 0.000101572s
[INFO] 10.244.0.21:53795 - 38634 "AAAA IN storage.googleapis.com.us-central1-a.c.k8s-minikube.internal. udp 89 false 1232" NXDOMAIN qr,rd,ra 78 0.006879793s
[INFO] 10.244.0.21:48589 - 44085 "A IN storage.googleapis.com.us-central1-a.c.k8s-minikube.internal. udp 89 false 1232" NXDOMAIN qr,rd,ra 78 0.008594153s
[INFO] 10.244.0.21:60055 - 59587 "AAAA IN storage.googleapis.com.c.k8s-minikube.internal. udp 75 false 1232" NXDOMAIN qr,rd,ra 64 0.006320998s
[INFO] 10.244.0.21:32898 - 25719 "A IN storage.googleapis.com.c.k8s-minikube.internal. udp 75 false 1232" NXDOMAIN qr,rd,ra 64 0.007523787s
[INFO] 10.244.0.21:32960 - 60775 "A IN storage.googleapis.com.google.internal. udp 67 false 1232" NXDOMAIN qr,rd,ra 56 0.005392999s
[INFO] 10.244.0.21:52212 - 13209 "AAAA IN storage.googleapis.com.google.internal. udp 67 false 1232" NXDOMAIN qr,rd,ra 56 0.01749185s
[INFO] 10.244.0.21:37955 - 23033 "A IN storage.googleapis.com. udp 51 false 1232" NOERROR qr,rd,ra 648 0.000947312s
[INFO] 10.244.0.21:42949 - 59112 "AAAA IN storage.googleapis.com. udp 51 false 1232" NOERROR qr,rd,ra 240 0.001033076s
[INFO] 10.244.0.26:33613 - 2 "AAAA IN registry.kube-system.svc.cluster.local. udp 56 false 512" NOERROR qr,aa,rd 149 0.000215687s
[INFO] 10.244.0.26:52084 - 3 "A IN registry.kube-system.svc.cluster.local. udp 56 false 512" NOERROR qr,aa,rd 110 0.000138349s
==> describe nodes <==
Name: addons-808918
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=addons-808918
kubernetes.io/os=linux
minikube.k8s.io/commit=cef58b3fe6f92de527946427fe0cca1d2a7a15cc
minikube.k8s.io/name=addons-808918
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2024_08_12T17_56_30_0700
minikube.k8s.io/version=v1.33.1
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
topology.hostpath.csi/node=addons-808918
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///var/run/crio/crio.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Mon, 12 Aug 2024 17:56:27 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: addons-808918
AcquireTime: <unset>
RenewTime: Mon, 12 Aug 2024 18:02:28 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Mon, 12 Aug 2024 18:00:35 +0000 Mon, 12 Aug 2024 17:56:26 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Mon, 12 Aug 2024 18:00:35 +0000 Mon, 12 Aug 2024 17:56:26 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Mon, 12 Aug 2024 18:00:35 +0000 Mon, 12 Aug 2024 17:56:26 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Mon, 12 Aug 2024 18:00:35 +0000 Mon, 12 Aug 2024 17:57:02 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.49.2
Hostname: addons-808918
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32859316Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32859316Ki
pods: 110
System Info:
Machine ID: 0ae5f64a6c8d48f48dd120b513e382c6
System UUID: e3a1fc3f-6648-44f9-a73f-032d6cdf188e
Boot ID: 0dc6aaf6-9184-4868-a91f-f2a41e60c5d5
Kernel Version: 5.15.0-1066-gcp
OS Image: Ubuntu 22.04.4 LTS
Operating System: linux
Architecture: amd64
Container Runtime Version: cri-o://1.24.6
Kubelet Version: v1.30.3
Kube-Proxy Version: v1.30.3
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (12 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 3m36s
default hello-world-app-6778b5fc9f-9trgx 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 10s
default nginx 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 2m30s
kube-system coredns-7db6d8ff4d-k9fzz 100m (1%!)(MISSING) 0 (0%!)(MISSING) 70Mi (0%!)(MISSING) 170Mi (0%!)(MISSING) 5m46s
kube-system etcd-addons-808918 100m (1%!)(MISSING) 0 (0%!)(MISSING) 100Mi (0%!)(MISSING) 0 (0%!)(MISSING) 6m1s
kube-system kindnet-ssl56 100m (1%!)(MISSING) 100m (1%!)(MISSING) 50Mi (0%!)(MISSING) 50Mi (0%!)(MISSING) 5m47s
kube-system kube-apiserver-addons-808918 250m (3%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 6m1s
kube-system kube-controller-manager-addons-808918 200m (2%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 6m1s
kube-system kube-proxy-gq98h 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 5m47s
kube-system kube-scheduler-addons-808918 100m (1%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 6m1s
kube-system metrics-server-c59844bb4-5vl77 100m (1%!)(MISSING) 0 (0%!)(MISSING) 200Mi (0%!)(MISSING) 0 (0%!)(MISSING) 5m42s
kube-system storage-provisioner 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 5m42s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 950m (11%!)(MISSING) 100m (1%!)(MISSING)
memory 420Mi (1%!)(MISSING) 220Mi (0%!)(MISSING)
ephemeral-storage 0 (0%!)(MISSING) 0 (0%!)(MISSING)
hugepages-1Gi 0 (0%!)(MISSING) 0 (0%!)(MISSING)
hugepages-2Mi 0 (0%!)(MISSING) 0 (0%!)(MISSING)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 5m42s kube-proxy
Normal Starting 6m6s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 6m6s (x8 over 6m6s) kubelet Node addons-808918 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 6m6s (x8 over 6m6s) kubelet Node addons-808918 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 6m6s (x8 over 6m6s) kubelet Node addons-808918 status is now: NodeHasSufficientPID
Normal Starting 6m1s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 6m1s kubelet Node addons-808918 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 6m1s kubelet Node addons-808918 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 6m1s kubelet Node addons-808918 status is now: NodeHasSufficientPID
Normal RegisteredNode 5m47s node-controller Node addons-808918 event: Registered Node addons-808918 in Controller
Normal NodeReady 5m28s kubelet Node addons-808918 status is now: NodeReady
==> dmesg <==
[ +0.000009] ll header: 00000000: ff ff ff ff ff ff 0a f2 0c 99 eb 6e 08 06
[ +11.387308] IPv4: martian source 10.244.0.1 from 10.244.0.3, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff 82 ad d8 1b c3 a2 08 06
[ +0.000538] IPv4: martian source 10.244.0.3 from 10.244.0.2, on dev eth0
[ +0.000004] ll header: 00000000: ff ff ff ff ff ff 12 1b c8 38 29 be 08 06
[ +13.967950] IPv4: martian source 10.244.0.1 from 10.244.0.2, on dev eth0
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff e2 d4 31 c5 ba 5a 08 06
[Aug12 17:27] IPv4: martian source 10.244.0.1 from 10.244.0.3, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff 82 47 c4 64 c6 8a 08 06
[ +32.481841] IPv4: martian source 10.244.0.3 from 10.244.0.2, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff e2 d4 31 c5 ba 5a 08 06
[Aug12 18:00] IPv4: martian source 10.244.0.20 from 127.0.0.1, on dev eth0
[ +0.000006] ll header: 00000000: c6 fb 93 df 05 1f ee dd 07 40 fc ce 08 00
[ +1.031768] IPv4: martian source 10.244.0.20 from 127.0.0.1, on dev eth0
[ +0.000008] ll header: 00000000: c6 fb 93 df 05 1f ee dd 07 40 fc ce 08 00
[ +2.015868] IPv4: martian source 10.244.0.20 from 127.0.0.1, on dev eth0
[ +0.000008] ll header: 00000000: c6 fb 93 df 05 1f ee dd 07 40 fc ce 08 00
[ +4.031699] IPv4: martian source 10.244.0.20 from 127.0.0.1, on dev eth0
[ +0.000008] ll header: 00000000: c6 fb 93 df 05 1f ee dd 07 40 fc ce 08 00
[ +8.191397] IPv4: martian source 10.244.0.20 from 127.0.0.1, on dev eth0
[ +0.000007] ll header: 00000000: c6 fb 93 df 05 1f ee dd 07 40 fc ce 08 00
[ +16.126819] IPv4: martian source 10.244.0.20 from 127.0.0.1, on dev eth0
[ +0.000022] ll header: 00000000: c6 fb 93 df 05 1f ee dd 07 40 fc ce 08 00
[Aug12 18:01] IPv4: martian source 10.244.0.20 from 127.0.0.1, on dev eth0
[ +0.000006] ll header: 00000000: c6 fb 93 df 05 1f ee dd 07 40 fc ce 08 00
==> etcd [a6e508d44055a56fcac43a3530f084186f88151812cea99189346bbd57cbba1b] <==
{"level":"warn","ts":"2024-08-12T17:56:46.564884Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2024-08-12T17:56:46.264375Z","time spent":"300.428314ms","remote":"127.0.0.1:44708","response type":"/etcdserverpb.KV/Txn","request count":1,"request size":721,"response count":0,"response size":40,"request content":"compare:<target:MOD key:\"/registry/events/kube-system/coredns-7db6d8ff4d-xgb5b.17eb0cd876fc0a56\" mod_revision:0 > success:<request_put:<key:\"/registry/events/kube-system/coredns-7db6d8ff4d-xgb5b.17eb0cd876fc0a56\" value_size:633 lease:8128031164778798863 >> failure:<>"}
{"level":"info","ts":"2024-08-12T17:56:46.555946Z","caller":"traceutil/trace.go:171","msg":"trace[1320227445] transaction","detail":"{read_only:false; response_revision:415; number_of_response:1; }","duration":"291.412593ms","start":"2024-08-12T17:56:46.264525Z","end":"2024-08-12T17:56:46.555938Z","steps":["trace[1320227445] 'process raft request' (duration: 290.857331ms)"],"step_count":1}
{"level":"info","ts":"2024-08-12T17:56:46.556043Z","caller":"traceutil/trace.go:171","msg":"trace[845739360] linearizableReadLoop","detail":"{readStateIndex:427; appliedIndex:425; }","duration":"108.434501ms","start":"2024-08-12T17:56:46.4476Z","end":"2024-08-12T17:56:46.556034Z","steps":["trace[845739360] 'read index received' (duration: 8.364346ms)","trace[845739360] 'applied index is now lower than readState.Index' (duration: 100.069515ms)"],"step_count":2}
{"level":"warn","ts":"2024-08-12T17:56:46.556106Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"108.490824ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/deployments/kube-system/coredns\" ","response":"range_response_count:1 size:4096"}
{"level":"warn","ts":"2024-08-12T17:56:46.566431Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2024-08-12T17:56:46.264519Z","time spent":"301.858286ms","remote":"127.0.0.1:44824","response type":"/etcdserverpb.KV/Txn","request count":1,"request size":229,"response count":0,"response size":40,"request content":"compare:<target:MOD key:\"/registry/serviceaccounts/kube-system/endpointslicemirroring-controller\" mod_revision:321 > success:<request_put:<key:\"/registry/serviceaccounts/kube-system/endpointslicemirroring-controller\" value_size:150 >> failure:<request_range:<key:\"/registry/serviceaccounts/kube-system/endpointslicemirroring-controller\" > >"}
{"level":"warn","ts":"2024-08-12T17:56:46.566632Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"115.612654ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/serviceaccounts/kube-system/minikube-ingress-dns\" ","response":"range_response_count:0 size:5"}
{"level":"info","ts":"2024-08-12T17:56:46.567075Z","caller":"traceutil/trace.go:171","msg":"trace[1832162950] range","detail":"{range_begin:/registry/serviceaccounts/kube-system/minikube-ingress-dns; range_end:; response_count:0; response_revision:417; }","duration":"116.080466ms","start":"2024-08-12T17:56:46.450981Z","end":"2024-08-12T17:56:46.567062Z","steps":["trace[1832162950] 'agreement among raft nodes before linearized reading' (duration: 115.611815ms)"],"step_count":1}
{"level":"info","ts":"2024-08-12T17:56:46.566757Z","caller":"traceutil/trace.go:171","msg":"trace[767760448] range","detail":"{range_begin:/registry/deployments/kube-system/coredns; range_end:; response_count:1; response_revision:417; }","duration":"119.167147ms","start":"2024-08-12T17:56:46.447564Z","end":"2024-08-12T17:56:46.566731Z","steps":["trace[767760448] 'agreement among raft nodes before linearized reading' (duration: 108.494633ms)"],"step_count":1}
{"level":"info","ts":"2024-08-12T17:56:46.747603Z","caller":"traceutil/trace.go:171","msg":"trace[778766323] transaction","detail":"{read_only:false; response_revision:418; number_of_response:1; }","duration":"180.847836ms","start":"2024-08-12T17:56:46.566739Z","end":"2024-08-12T17:56:46.747587Z","steps":["trace[778766323] 'process raft request' (duration: 180.767585ms)"],"step_count":1}
{"level":"info","ts":"2024-08-12T17:56:46.761012Z","caller":"traceutil/trace.go:171","msg":"trace[184342505] transaction","detail":"{read_only:false; response_revision:419; number_of_response:1; }","duration":"101.186405ms","start":"2024-08-12T17:56:46.659811Z","end":"2024-08-12T17:56:46.760997Z","steps":["trace[184342505] 'process raft request' (duration: 100.988549ms)"],"step_count":1}
{"level":"info","ts":"2024-08-12T17:56:46.761287Z","caller":"traceutil/trace.go:171","msg":"trace[1083676019] transaction","detail":"{read_only:false; response_revision:420; number_of_response:1; }","duration":"101.234676ms","start":"2024-08-12T17:56:46.660039Z","end":"2024-08-12T17:56:46.761274Z","steps":["trace[1083676019] 'process raft request' (duration: 100.849051ms)"],"step_count":1}
{"level":"info","ts":"2024-08-12T17:56:46.862949Z","caller":"traceutil/trace.go:171","msg":"trace[320645314] transaction","detail":"{read_only:false; response_revision:422; number_of_response:1; }","duration":"115.602299ms","start":"2024-08-12T17:56:46.747319Z","end":"2024-08-12T17:56:46.862921Z","steps":["trace[320645314] 'process raft request' (duration: 104.349165ms)","trace[320645314] 'compare' (duration: 11.096105ms)"],"step_count":2}
{"level":"info","ts":"2024-08-12T17:56:46.863089Z","caller":"traceutil/trace.go:171","msg":"trace[1575666765] linearizableReadLoop","detail":"{readStateIndex:434; appliedIndex:433; }","duration":"102.094089ms","start":"2024-08-12T17:56:46.760977Z","end":"2024-08-12T17:56:46.863071Z","steps":["trace[1575666765] 'read index received' (duration: 90.642087ms)","trace[1575666765] 'applied index is now lower than readState.Index' (duration: 11.450684ms)"],"step_count":2}
{"level":"warn","ts":"2024-08-12T17:56:46.863302Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"109.907603ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/namespaces/kube-system\" ","response":"range_response_count:1 size:351"}
{"level":"info","ts":"2024-08-12T17:56:46.863345Z","caller":"traceutil/trace.go:171","msg":"trace[38192066] range","detail":"{range_begin:/registry/namespaces/kube-system; range_end:; response_count:1; response_revision:422; }","duration":"109.986005ms","start":"2024-08-12T17:56:46.75335Z","end":"2024-08-12T17:56:46.863336Z","steps":["trace[38192066] 'agreement among raft nodes before linearized reading' (duration: 109.898426ms)"],"step_count":1}
{"level":"warn","ts":"2024-08-12T17:56:46.863831Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"108.085912ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/serviceaccounts/kube-system/storage-provisioner\" ","response":"range_response_count:0 size:5"}
{"level":"info","ts":"2024-08-12T17:56:46.863873Z","caller":"traceutil/trace.go:171","msg":"trace[1001039302] range","detail":"{range_begin:/registry/serviceaccounts/kube-system/storage-provisioner; range_end:; response_count:0; response_revision:423; }","duration":"108.152262ms","start":"2024-08-12T17:56:46.75571Z","end":"2024-08-12T17:56:46.863863Z","steps":["trace[1001039302] 'agreement among raft nodes before linearized reading' (duration: 107.462756ms)"],"step_count":1}
{"level":"info","ts":"2024-08-12T17:56:47.250499Z","caller":"traceutil/trace.go:171","msg":"trace[771517155] transaction","detail":"{read_only:false; response_revision:425; number_of_response:1; }","duration":"101.490743ms","start":"2024-08-12T17:56:47.148991Z","end":"2024-08-12T17:56:47.250482Z","steps":["trace[771517155] 'process raft request' (duration: 101.132536ms)"],"step_count":1}
{"level":"warn","ts":"2024-08-12T17:56:47.753178Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"100.737635ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/namespaces/kube-system\" ","response":"range_response_count:1 size:351"}
{"level":"info","ts":"2024-08-12T17:56:47.75516Z","caller":"traceutil/trace.go:171","msg":"trace[442082213] range","detail":"{range_begin:/registry/namespaces/kube-system; range_end:; response_count:1; response_revision:452; }","duration":"102.750987ms","start":"2024-08-12T17:56:47.652392Z","end":"2024-08-12T17:56:47.755143Z","steps":["trace[442082213] 'range keys from in-memory index tree' (duration: 83.601044ms)"],"step_count":1}
{"level":"warn","ts":"2024-08-12T17:56:48.651114Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"182.007212ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/deployments/kube-system/metrics-server\" ","response":"range_response_count:0 size:5"}
{"level":"info","ts":"2024-08-12T17:56:48.651313Z","caller":"traceutil/trace.go:171","msg":"trace[1745891030] range","detail":"{range_begin:/registry/deployments/kube-system/metrics-server; range_end:; response_count:0; response_revision:517; }","duration":"182.220516ms","start":"2024-08-12T17:56:48.469062Z","end":"2024-08-12T17:56:48.651282Z","steps":["trace[1745891030] 'agreement among raft nodes before linearized reading' (duration: 181.947141ms)"],"step_count":1}
{"level":"info","ts":"2024-08-12T17:57:47.146756Z","caller":"traceutil/trace.go:171","msg":"trace[414200849] transaction","detail":"{read_only:false; response_revision:1174; number_of_response:1; }","duration":"176.758571ms","start":"2024-08-12T17:57:46.969962Z","end":"2024-08-12T17:57:47.146721Z","steps":["trace[414200849] 'process raft request' (duration: 176.448636ms)"],"step_count":1}
{"level":"warn","ts":"2024-08-12T17:57:47.14683Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"100.247843ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/pods/kube-system/\" range_end:\"/registry/pods/kube-system0\" ","response":"range_response_count:19 size:95046"}
{"level":"info","ts":"2024-08-12T17:57:47.147142Z","caller":"traceutil/trace.go:171","msg":"trace[1936942855] range","detail":"{range_begin:/registry/pods/kube-system/; range_end:/registry/pods/kube-system0; response_count:19; response_revision:1174; }","duration":"100.600116ms","start":"2024-08-12T17:57:47.046524Z","end":"2024-08-12T17:57:47.147124Z","steps":["trace[1936942855] 'agreement among raft nodes before linearized reading' (duration: 100.067902ms)"],"step_count":1}
==> kernel <==
18:02:30 up 1:44, 0 users, load average: 0.38, 0.87, 0.74
Linux addons-808918 5.15.0-1066-gcp #74~20.04.1-Ubuntu SMP Fri Jul 26 09:28:41 UTC 2024 x86_64 x86_64 x86_64 GNU/Linux
PRETTY_NAME="Ubuntu 22.04.4 LTS"
==> kindnet [975ffd4ef95f567e3d06db5ce1b5c1cc8b25e5ecc0a4d81f24f0044057aafe33] <==
I0812 18:01:12.647132 1 main.go:299] handling current node
I0812 18:01:22.646934 1 main.go:295] Handling node with IPs: map[192.168.49.2:{}]
I0812 18:01:22.646973 1 main.go:299] handling current node
W0812 18:01:27.755995 1 reflector.go:547] pkg/mod/k8s.io/client-go@v0.30.3/tools/cache/reflector.go:232: failed to list *v1.NetworkPolicy: networkpolicies.networking.k8s.io is forbidden: User "system:serviceaccount:kube-system:kindnet" cannot list resource "networkpolicies" in API group "networking.k8s.io" at the cluster scope
E0812 18:01:27.756043 1 reflector.go:150] pkg/mod/k8s.io/client-go@v0.30.3/tools/cache/reflector.go:232: Failed to watch *v1.NetworkPolicy: failed to list *v1.NetworkPolicy: networkpolicies.networking.k8s.io is forbidden: User "system:serviceaccount:kube-system:kindnet" cannot list resource "networkpolicies" in API group "networking.k8s.io" at the cluster scope
I0812 18:01:32.646735 1 main.go:295] Handling node with IPs: map[192.168.49.2:{}]
I0812 18:01:32.646772 1 main.go:299] handling current node
W0812 18:01:37.535586 1 reflector.go:547] pkg/mod/k8s.io/client-go@v0.30.3/tools/cache/reflector.go:232: failed to list *v1.Namespace: namespaces is forbidden: User "system:serviceaccount:kube-system:kindnet" cannot list resource "namespaces" in API group "" at the cluster scope
E0812 18:01:37.535618 1 reflector.go:150] pkg/mod/k8s.io/client-go@v0.30.3/tools/cache/reflector.go:232: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User "system:serviceaccount:kube-system:kindnet" cannot list resource "namespaces" in API group "" at the cluster scope
I0812 18:01:42.647443 1 main.go:295] Handling node with IPs: map[192.168.49.2:{}]
I0812 18:01:42.647480 1 main.go:299] handling current node
I0812 18:01:52.647131 1 main.go:295] Handling node with IPs: map[192.168.49.2:{}]
I0812 18:01:52.647172 1 main.go:299] handling current node
I0812 18:02:02.646775 1 main.go:295] Handling node with IPs: map[192.168.49.2:{}]
I0812 18:02:02.646832 1 main.go:299] handling current node
W0812 18:02:07.551167 1 reflector.go:547] pkg/mod/k8s.io/client-go@v0.30.3/tools/cache/reflector.go:232: failed to list *v1.Pod: pods is forbidden: User "system:serviceaccount:kube-system:kindnet" cannot list resource "pods" in API group "" at the cluster scope
E0812 18:02:07.551212 1 reflector.go:150] pkg/mod/k8s.io/client-go@v0.30.3/tools/cache/reflector.go:232: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:serviceaccount:kube-system:kindnet" cannot list resource "pods" in API group "" at the cluster scope
I0812 18:02:12.647170 1 main.go:295] Handling node with IPs: map[192.168.49.2:{}]
I0812 18:02:12.647210 1 main.go:299] handling current node
W0812 18:02:21.158462 1 reflector.go:547] pkg/mod/k8s.io/client-go@v0.30.3/tools/cache/reflector.go:232: failed to list *v1.Namespace: namespaces is forbidden: User "system:serviceaccount:kube-system:kindnet" cannot list resource "namespaces" in API group "" at the cluster scope
E0812 18:02:21.158492 1 reflector.go:150] pkg/mod/k8s.io/client-go@v0.30.3/tools/cache/reflector.go:232: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User "system:serviceaccount:kube-system:kindnet" cannot list resource "namespaces" in API group "" at the cluster scope
I0812 18:02:22.647223 1 main.go:295] Handling node with IPs: map[192.168.49.2:{}]
I0812 18:02:22.647256 1 main.go:299] handling current node
W0812 18:02:23.448450 1 reflector.go:547] pkg/mod/k8s.io/client-go@v0.30.3/tools/cache/reflector.go:232: failed to list *v1.NetworkPolicy: networkpolicies.networking.k8s.io is forbidden: User "system:serviceaccount:kube-system:kindnet" cannot list resource "networkpolicies" in API group "networking.k8s.io" at the cluster scope
E0812 18:02:23.448487 1 reflector.go:150] pkg/mod/k8s.io/client-go@v0.30.3/tools/cache/reflector.go:232: Failed to watch *v1.NetworkPolicy: failed to list *v1.NetworkPolicy: networkpolicies.networking.k8s.io is forbidden: User "system:serviceaccount:kube-system:kindnet" cannot list resource "networkpolicies" in API group "networking.k8s.io" at the cluster scope
==> kube-apiserver [b21e7ec861b31b114311f638f7f92785f0c7c7a2eb803886a0a3c4fb3b490742] <==
E0812 17:59:20.487408 1 authentication.go:73] "Unable to authenticate the request" err="[invalid bearer token, serviceaccounts \"local-path-provisioner-service-account\" not found]"
E0812 17:59:20.492967 1 authentication.go:73] "Unable to authenticate the request" err="[invalid bearer token, serviceaccounts \"local-path-provisioner-service-account\" not found]"
E0812 17:59:20.498466 1 authentication.go:73] "Unable to authenticate the request" err="[invalid bearer token, serviceaccounts \"local-path-provisioner-service-account\" not found]"
I0812 17:59:34.106431 1 controller.go:615] quota admission added evaluator for: volumesnapshots.snapshot.storage.k8s.io
E0812 17:59:35.499980 1 authentication.go:73] "Unable to authenticate the request" err="[invalid bearer token, serviceaccounts \"local-path-provisioner-service-account\" not found]"
E0812 17:59:46.933327 1 upgradeaware.go:427] Error proxying data from client to backend: read tcp 192.168.49.2:8443->10.244.0.28:52114: read: connection reset by peer
I0812 17:59:54.698852 1 handler.go:286] Adding GroupVersion gadget.kinvolk.io v1alpha1 to ResourceManager
W0812 17:59:55.713461 1 cacher.go:168] Terminating all watchers from cacher traces.gadget.kinvolk.io
I0812 18:00:00.178503 1 controller.go:615] quota admission added evaluator for: ingresses.networking.k8s.io
I0812 18:00:00.454648 1 alloc.go:330] "allocated clusterIPs" service="default/nginx" clusterIPs={"IPv4":"10.106.13.227"}
I0812 18:00:07.378050 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0812 18:00:07.378102 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0812 18:00:07.389997 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0812 18:00:07.390172 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0812 18:00:07.390397 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0812 18:00:07.390426 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0812 18:00:07.402746 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0812 18:00:07.402796 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0812 18:00:07.446740 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0812 18:00:07.446860 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0812 18:00:08.278905 1 alloc.go:330] "allocated clusterIPs" service="headlamp/headlamp" clusterIPs={"IPv4":"10.110.77.103"}
W0812 18:00:08.390915 1 cacher.go:168] Terminating all watchers from cacher volumesnapshotclasses.snapshot.storage.k8s.io
W0812 18:00:08.447433 1 cacher.go:168] Terminating all watchers from cacher volumesnapshotcontents.snapshot.storage.k8s.io
W0812 18:00:08.455329 1 cacher.go:168] Terminating all watchers from cacher volumesnapshots.snapshot.storage.k8s.io
I0812 18:02:20.451919 1 alloc.go:330] "allocated clusterIPs" service="default/hello-world-app" clusterIPs={"IPv4":"10.110.252.114"}
==> kube-controller-manager [f2a2a0b2bb6b732277d5594073a67536a8140a95757eada209ef572d9bd752d0] <==
W0812 18:01:14.171052 1 reflector.go:547] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0812 18:01:14.171087 1 reflector.go:150] k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 18:01:22.669595 1 reflector.go:547] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0812 18:01:22.669632 1 reflector.go:150] k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 18:01:33.868585 1 reflector.go:547] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0812 18:01:33.868620 1 reflector.go:150] k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 18:01:44.758342 1 reflector.go:547] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0812 18:01:44.758386 1 reflector.go:150] k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 18:01:54.049147 1 reflector.go:547] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0812 18:01:54.049186 1 reflector.go:150] k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 18:01:57.933534 1 reflector.go:547] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0812 18:01:57.933571 1 reflector.go:150] k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0812 18:02:12.095978 1 reflector.go:547] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0812 18:02:12.096023 1 reflector.go:150] k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0812 18:02:20.266224 1 replica_set.go:676] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="default/hello-world-app-6778b5fc9f" duration="14.521373ms"
I0812 18:02:20.272666 1 replica_set.go:676] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="default/hello-world-app-6778b5fc9f" duration="6.324175ms"
I0812 18:02:20.272750 1 replica_set.go:676] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="default/hello-world-app-6778b5fc9f" duration="43.385µs"
I0812 18:02:20.278225 1 replica_set.go:676] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="default/hello-world-app-6778b5fc9f" duration="78.599µs"
I0812 18:02:22.655959 1 job_controller.go:566] "enqueueing job" logger="job-controller" key="ingress-nginx/ingress-nginx-admission-create"
I0812 18:02:22.657481 1 replica_set.go:676] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="ingress-nginx/ingress-nginx-controller-6d9bd977d4" duration="5.294µs"
I0812 18:02:22.659521 1 job_controller.go:566] "enqueueing job" logger="job-controller" key="ingress-nginx/ingress-nginx-admission-patch"
I0812 18:02:23.149060 1 replica_set.go:676] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="default/hello-world-app-6778b5fc9f" duration="5.324006ms"
I0812 18:02:23.149171 1 replica_set.go:676] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="default/hello-world-app-6778b5fc9f" duration="52.572µs"
W0812 18:02:23.974122 1 reflector.go:547] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0812 18:02:23.974157 1 reflector.go:150] k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
==> kube-proxy [f630daeff0617517e30ecb4fa8df7e1b893f85bce15ef1de4d21eff4ae5fbcc5] <==
I0812 17:56:46.968745 1 server_linux.go:69] "Using iptables proxy"
I0812 17:56:47.372939 1 server.go:1062] "Successfully retrieved node IP(s)" IPs=["192.168.49.2"]
I0812 17:56:48.546502 1 server.go:659] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I0812 17:56:48.546596 1 server_linux.go:165] "Using iptables Proxier"
I0812 17:56:48.656032 1 server_linux.go:511] "Detect-local-mode set to ClusterCIDR, but no cluster CIDR for family" ipFamily="IPv6"
I0812 17:56:48.664681 1 server_linux.go:528] "Defaulting to no-op detect-local"
I0812 17:56:48.668408 1 proxier.go:243] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses"
I0812 17:56:48.668764 1 server.go:872] "Version info" version="v1.30.3"
I0812 17:56:48.669185 1 server.go:874] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I0812 17:56:48.670546 1 config.go:192] "Starting service config controller"
I0812 17:56:48.670567 1 shared_informer.go:313] Waiting for caches to sync for service config
I0812 17:56:48.670600 1 config.go:101] "Starting endpoint slice config controller"
I0812 17:56:48.670604 1 shared_informer.go:313] Waiting for caches to sync for endpoint slice config
I0812 17:56:48.671070 1 config.go:319] "Starting node config controller"
I0812 17:56:48.671077 1 shared_informer.go:313] Waiting for caches to sync for node config
I0812 17:56:48.846505 1 shared_informer.go:320] Caches are synced for node config
I0812 17:56:48.846665 1 shared_informer.go:320] Caches are synced for service config
I0812 17:56:48.846748 1 shared_informer.go:320] Caches are synced for endpoint slice config
==> kube-scheduler [5eb00253940e3eeeae339c8a8234039a48b3efa17916267fccc2abcb00191cf4] <==
W0812 17:56:27.271352 1 reflector.go:547] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E0812 17:56:27.346852 1 reflector.go:150] k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
W0812 17:56:27.348175 1 reflector.go:547] runtime/asm_amd64.s:1695: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
E0812 17:56:27.348988 1 reflector.go:150] runtime/asm_amd64.s:1695: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
W0812 17:56:28.077731 1 reflector.go:547] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
E0812 17:56:28.077765 1 reflector.go:150] k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
W0812 17:56:28.084713 1 reflector.go:547] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E0812 17:56:28.084737 1 reflector.go:150] k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
W0812 17:56:28.229212 1 reflector.go:547] runtime/asm_amd64.s:1695: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
E0812 17:56:28.229250 1 reflector.go:150] runtime/asm_amd64.s:1695: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
W0812 17:56:28.240200 1 reflector.go:547] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E0812 17:56:28.240238 1 reflector.go:150] k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
W0812 17:56:28.243097 1 reflector.go:547] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E0812 17:56:28.243131 1 reflector.go:150] k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
W0812 17:56:28.318824 1 reflector.go:547] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
E0812 17:56:28.318865 1 reflector.go:150] k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
W0812 17:56:28.355932 1 reflector.go:547] k8s.io/client-go/informers/factory.go:160: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
E0812 17:56:28.355975 1 reflector.go:150] k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
W0812 17:56:28.411257 1 reflector.go:547] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
E0812 17:56:28.411300 1 reflector.go:150] k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
W0812 17:56:28.417161 1 reflector.go:547] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E0812 17:56:28.417198 1 reflector.go:150] k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
W0812 17:56:28.436217 1 reflector.go:547] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E0812 17:56:28.436249 1 reflector.go:150] k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
I0812 17:56:30.470056 1 shared_informer.go:320] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Aug 12 18:02:20 addons-808918 kubelet[1756]: I0812 18:02:20.273340 1756 reconciler_common.go:247] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cnnnn\" (UniqueName: \"kubernetes.io/projected/fdd72da1-d04b-4530-9507-97f13c2fa449-kube-api-access-cnnnn\") pod \"hello-world-app-6778b5fc9f-9trgx\" (UID: \"fdd72da1-d04b-4530-9507-97f13c2fa449\") " pod="default/hello-world-app-6778b5fc9f-9trgx"
Aug 12 18:02:21 addons-808918 kubelet[1756]: I0812 18:02:21.380787 1756 reconciler_common.go:161] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l2lqb\" (UniqueName: \"kubernetes.io/projected/2ee227fa-4a7a-4608-973b-6fb4ed08f281-kube-api-access-l2lqb\") pod \"2ee227fa-4a7a-4608-973b-6fb4ed08f281\" (UID: \"2ee227fa-4a7a-4608-973b-6fb4ed08f281\") "
Aug 12 18:02:21 addons-808918 kubelet[1756]: I0812 18:02:21.382578 1756 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ee227fa-4a7a-4608-973b-6fb4ed08f281-kube-api-access-l2lqb" (OuterVolumeSpecName: "kube-api-access-l2lqb") pod "2ee227fa-4a7a-4608-973b-6fb4ed08f281" (UID: "2ee227fa-4a7a-4608-973b-6fb4ed08f281"). InnerVolumeSpecName "kube-api-access-l2lqb". PluginName "kubernetes.io/projected", VolumeGidValue ""
Aug 12 18:02:21 addons-808918 kubelet[1756]: I0812 18:02:21.481892 1756 reconciler_common.go:289] "Volume detached for volume \"kube-api-access-l2lqb\" (UniqueName: \"kubernetes.io/projected/2ee227fa-4a7a-4608-973b-6fb4ed08f281-kube-api-access-l2lqb\") on node \"addons-808918\" DevicePath \"\""
Aug 12 18:02:22 addons-808918 kubelet[1756]: I0812 18:02:22.130893 1756 scope.go:117] "RemoveContainer" containerID="5e7fb22984fa64e3c1c3310f1ee7ba87b479bc23ed79460fdb66cd30bf73938f"
Aug 12 18:02:22 addons-808918 kubelet[1756]: I0812 18:02:22.148552 1756 scope.go:117] "RemoveContainer" containerID="5e7fb22984fa64e3c1c3310f1ee7ba87b479bc23ed79460fdb66cd30bf73938f"
Aug 12 18:02:22 addons-808918 kubelet[1756]: E0812 18:02:22.149016 1756 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5e7fb22984fa64e3c1c3310f1ee7ba87b479bc23ed79460fdb66cd30bf73938f\": container with ID starting with 5e7fb22984fa64e3c1c3310f1ee7ba87b479bc23ed79460fdb66cd30bf73938f not found: ID does not exist" containerID="5e7fb22984fa64e3c1c3310f1ee7ba87b479bc23ed79460fdb66cd30bf73938f"
Aug 12 18:02:22 addons-808918 kubelet[1756]: I0812 18:02:22.149059 1756 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5e7fb22984fa64e3c1c3310f1ee7ba87b479bc23ed79460fdb66cd30bf73938f"} err="failed to get container status \"5e7fb22984fa64e3c1c3310f1ee7ba87b479bc23ed79460fdb66cd30bf73938f\": rpc error: code = NotFound desc = could not find container \"5e7fb22984fa64e3c1c3310f1ee7ba87b479bc23ed79460fdb66cd30bf73938f\": container with ID starting with 5e7fb22984fa64e3c1c3310f1ee7ba87b479bc23ed79460fdb66cd30bf73938f not found: ID does not exist"
Aug 12 18:02:23 addons-808918 kubelet[1756]: I0812 18:02:23.143944 1756 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="default/hello-world-app-6778b5fc9f-9trgx" podStartSLOduration=1.477160302 podStartE2EDuration="3.143918815s" podCreationTimestamp="2024-08-12 18:02:20 +0000 UTC" firstStartedPulling="2024-08-12 18:02:20.606495862 +0000 UTC m=+350.926282800" lastFinishedPulling="2024-08-12 18:02:22.273254361 +0000 UTC m=+352.593041313" observedRunningTime="2024-08-12 18:02:23.143555936 +0000 UTC m=+353.463342893" watchObservedRunningTime="2024-08-12 18:02:23.143918815 +0000 UTC m=+353.463705772"
Aug 12 18:02:23 addons-808918 kubelet[1756]: I0812 18:02:23.762601 1756 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1e450fdc-8cab-47f8-9bd7-50072dee65e8" path="/var/lib/kubelet/pods/1e450fdc-8cab-47f8-9bd7-50072dee65e8/volumes"
Aug 12 18:02:23 addons-808918 kubelet[1756]: I0812 18:02:23.762957 1756 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2ee227fa-4a7a-4608-973b-6fb4ed08f281" path="/var/lib/kubelet/pods/2ee227fa-4a7a-4608-973b-6fb4ed08f281/volumes"
Aug 12 18:02:23 addons-808918 kubelet[1756]: I0812 18:02:23.763266 1756 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d0acfde0-f583-4fb4-ab7b-7b3f3f26fcd3" path="/var/lib/kubelet/pods/d0acfde0-f583-4fb4-ab7b-7b3f3f26fcd3/volumes"
Aug 12 18:02:26 addons-808918 kubelet[1756]: I0812 18:02:26.010131 1756 reconciler_common.go:161] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xh5p9\" (UniqueName: \"kubernetes.io/projected/bb94187a-d528-48d4-9174-84f9ba45e9f0-kube-api-access-xh5p9\") pod \"bb94187a-d528-48d4-9174-84f9ba45e9f0\" (UID: \"bb94187a-d528-48d4-9174-84f9ba45e9f0\") "
Aug 12 18:02:26 addons-808918 kubelet[1756]: I0812 18:02:26.010197 1756 reconciler_common.go:161] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bb94187a-d528-48d4-9174-84f9ba45e9f0-webhook-cert\") pod \"bb94187a-d528-48d4-9174-84f9ba45e9f0\" (UID: \"bb94187a-d528-48d4-9174-84f9ba45e9f0\") "
Aug 12 18:02:26 addons-808918 kubelet[1756]: I0812 18:02:26.012092 1756 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb94187a-d528-48d4-9174-84f9ba45e9f0-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "bb94187a-d528-48d4-9174-84f9ba45e9f0" (UID: "bb94187a-d528-48d4-9174-84f9ba45e9f0"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue ""
Aug 12 18:02:26 addons-808918 kubelet[1756]: I0812 18:02:26.012104 1756 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bb94187a-d528-48d4-9174-84f9ba45e9f0-kube-api-access-xh5p9" (OuterVolumeSpecName: "kube-api-access-xh5p9") pod "bb94187a-d528-48d4-9174-84f9ba45e9f0" (UID: "bb94187a-d528-48d4-9174-84f9ba45e9f0"). InnerVolumeSpecName "kube-api-access-xh5p9". PluginName "kubernetes.io/projected", VolumeGidValue ""
Aug 12 18:02:26 addons-808918 kubelet[1756]: I0812 18:02:26.110374 1756 reconciler_common.go:289] "Volume detached for volume \"kube-api-access-xh5p9\" (UniqueName: \"kubernetes.io/projected/bb94187a-d528-48d4-9174-84f9ba45e9f0-kube-api-access-xh5p9\") on node \"addons-808918\" DevicePath \"\""
Aug 12 18:02:26 addons-808918 kubelet[1756]: I0812 18:02:26.110411 1756 reconciler_common.go:289] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bb94187a-d528-48d4-9174-84f9ba45e9f0-webhook-cert\") on node \"addons-808918\" DevicePath \"\""
Aug 12 18:02:26 addons-808918 kubelet[1756]: I0812 18:02:26.143531 1756 scope.go:117] "RemoveContainer" containerID="76609dfb374cc0231166f71e6e4efe321ae0676a3658c7b2fb0b51dbcfec6bf8"
Aug 12 18:02:26 addons-808918 kubelet[1756]: I0812 18:02:26.157913 1756 scope.go:117] "RemoveContainer" containerID="76609dfb374cc0231166f71e6e4efe321ae0676a3658c7b2fb0b51dbcfec6bf8"
Aug 12 18:02:26 addons-808918 kubelet[1756]: E0812 18:02:26.158256 1756 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"76609dfb374cc0231166f71e6e4efe321ae0676a3658c7b2fb0b51dbcfec6bf8\": container with ID starting with 76609dfb374cc0231166f71e6e4efe321ae0676a3658c7b2fb0b51dbcfec6bf8 not found: ID does not exist" containerID="76609dfb374cc0231166f71e6e4efe321ae0676a3658c7b2fb0b51dbcfec6bf8"
Aug 12 18:02:26 addons-808918 kubelet[1756]: I0812 18:02:26.158299 1756 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"76609dfb374cc0231166f71e6e4efe321ae0676a3658c7b2fb0b51dbcfec6bf8"} err="failed to get container status \"76609dfb374cc0231166f71e6e4efe321ae0676a3658c7b2fb0b51dbcfec6bf8\": rpc error: code = NotFound desc = could not find container \"76609dfb374cc0231166f71e6e4efe321ae0676a3658c7b2fb0b51dbcfec6bf8\": container with ID starting with 76609dfb374cc0231166f71e6e4efe321ae0676a3658c7b2fb0b51dbcfec6bf8 not found: ID does not exist"
Aug 12 18:02:27 addons-808918 kubelet[1756]: I0812 18:02:27.761981 1756 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bb94187a-d528-48d4-9174-84f9ba45e9f0" path="/var/lib/kubelet/pods/bb94187a-d528-48d4-9174-84f9ba45e9f0/volumes"
Aug 12 18:02:30 addons-808918 kubelet[1756]: I0812 18:02:30.043491 1756 scope.go:117] "RemoveContainer" containerID="592b81b132a6a35727d1282d0b6a97fdd003fa9fcef645145ce0585398584b7b"
Aug 12 18:02:30 addons-808918 kubelet[1756]: I0812 18:02:30.059168 1756 scope.go:117] "RemoveContainer" containerID="c0a00532c52c9dd30faa3ecd38a30bfe1496804e259de8333ad69068c4060d5a"
==> storage-provisioner [f81aec1fdd68dde23d06a8f781734c8885860834fabf2d8725f871029c4d23f0] <==
I0812 17:57:03.788975 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I0812 17:57:03.796747 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I0812 17:57:03.796801 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I0812 17:57:03.852348 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I0812 17:57:03.852559 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_addons-808918_49269686-cc0a-4cfa-8052-67891057af6d!
I0812 17:57:03.853418 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"130f3c49-6f0d-4108-8434-81523e255d84", APIVersion:"v1", ResourceVersion:"943", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' addons-808918_49269686-cc0a-4cfa-8052-67891057af6d became leader
I0812 17:57:03.953222 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_addons-808918_49269686-cc0a-4cfa-8052-67891057af6d!
-- /stdout --
helpers_test.go:254: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p addons-808918 -n addons-808918
helpers_test.go:261: (dbg) Run: kubectl --context addons-808918 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:285: <<< TestAddons/parallel/Ingress FAILED: end of post-mortem logs <<<
helpers_test.go:286: ---------------------/post-mortem---------------------------------
--- FAIL: TestAddons/parallel/Ingress (151.67s)