=== RUN TestAddons/parallel/Ingress
=== PAUSE TestAddons/parallel/Ingress
=== CONT TestAddons/parallel/Ingress
addons_test.go:209: (dbg) Run: kubectl --context addons-877132 wait --for=condition=ready --namespace=ingress-nginx pod --selector=app.kubernetes.io/component=controller --timeout=90s
addons_test.go:234: (dbg) Run: kubectl --context addons-877132 replace --force -f testdata/nginx-ingress-v1.yaml
addons_test.go:247: (dbg) Run: kubectl --context addons-877132 replace --force -f testdata/nginx-pod-svc.yaml
addons_test.go:252: (dbg) TestAddons/parallel/Ingress: waiting 8m0s for pods matching "run=nginx" in namespace "default" ...
helpers_test.go:344: "nginx" [df4c372b-b171-4467-b9a5-23a7831fc55d] Pending / Ready:ContainersNotReady (containers with unready status: [nginx]) / ContainersReady:ContainersNotReady (containers with unready status: [nginx])
helpers_test.go:344: "nginx" [df4c372b-b171-4467-b9a5-23a7831fc55d] Running
addons_test.go:252: (dbg) TestAddons/parallel/Ingress: run=nginx healthy within 9.003435269s
addons_test.go:264: (dbg) Run: out/minikube-linux-amd64 -p addons-877132 ssh "curl -s http://127.0.0.1/ -H 'Host: nginx.example.com'"
addons_test.go:264: (dbg) Non-zero exit: out/minikube-linux-amd64 -p addons-877132 ssh "curl -s http://127.0.0.1/ -H 'Host: nginx.example.com'": exit status 1 (2m9.872764362s)
** stderr **
ssh: Process exited with status 28
** /stderr **
addons_test.go:280: failed to get expected response from http://127.0.0.1/ within minikube: exit status 1
addons_test.go:288: (dbg) Run: kubectl --context addons-877132 replace --force -f testdata/ingress-dns-example-v1.yaml
addons_test.go:293: (dbg) Run: out/minikube-linux-amd64 -p addons-877132 ip
addons_test.go:299: (dbg) Run: nslookup hello-john.test 192.168.49.2
addons_test.go:308: (dbg) Run: out/minikube-linux-amd64 -p addons-877132 addons disable ingress-dns --alsologtostderr -v=1
addons_test.go:308: (dbg) Done: out/minikube-linux-amd64 -p addons-877132 addons disable ingress-dns --alsologtostderr -v=1: (1.301408695s)
addons_test.go:313: (dbg) Run: out/minikube-linux-amd64 -p addons-877132 addons disable ingress --alsologtostderr -v=1
addons_test.go:313: (dbg) Done: out/minikube-linux-amd64 -p addons-877132 addons disable ingress --alsologtostderr -v=1: (7.588332695s)
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:230: ======> post-mortem[TestAddons/parallel/Ingress]: docker inspect <======
helpers_test.go:231: (dbg) Run: docker inspect addons-877132
helpers_test.go:235: (dbg) docker inspect addons-877132:
-- stdout --
[
{
"Id": "0a128850adc6c9739319d0ccdc3a9eea5e6209a1908ca45931643f617a920748",
"Created": "2024-08-15T00:05:47.313639387Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 34174,
"ExitCode": 0,
"Error": "",
"StartedAt": "2024-08-15T00:05:47.430605182Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:49d4702e5c94195d7796cb79f5fbc9d7cc584c1c41f3c58bf1694d1da009b2f6",
"ResolvConfPath": "/var/lib/docker/containers/0a128850adc6c9739319d0ccdc3a9eea5e6209a1908ca45931643f617a920748/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/0a128850adc6c9739319d0ccdc3a9eea5e6209a1908ca45931643f617a920748/hostname",
"HostsPath": "/var/lib/docker/containers/0a128850adc6c9739319d0ccdc3a9eea5e6209a1908ca45931643f617a920748/hosts",
"LogPath": "/var/lib/docker/containers/0a128850adc6c9739319d0ccdc3a9eea5e6209a1908ca45931643f617a920748/0a128850adc6c9739319d0ccdc3a9eea5e6209a1908ca45931643f617a920748-json.log",
"Name": "/addons-877132",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"addons-877132:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {
"max-size": "100m"
}
},
"NetworkMode": "addons-877132",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 4194304000,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 8388608000,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"LowerDir": "/var/lib/docker/overlay2/076552733d609200f850ab223a0029186d490bef6b897443d3c21b9f8104b811-init/diff:/var/lib/docker/overlay2/0205a5511280a28ae3b2781b04e306ca3ba6d39df24866040bde00e4e577fc69/diff",
"MergedDir": "/var/lib/docker/overlay2/076552733d609200f850ab223a0029186d490bef6b897443d3c21b9f8104b811/merged",
"UpperDir": "/var/lib/docker/overlay2/076552733d609200f850ab223a0029186d490bef6b897443d3c21b9f8104b811/diff",
"WorkDir": "/var/lib/docker/overlay2/076552733d609200f850ab223a0029186d490bef6b897443d3c21b9f8104b811/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "addons-877132",
"Source": "/var/lib/docker/volumes/addons-877132/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "addons-877132",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723650208-19443@sha256:2be48dc5c74cde3c1d15ac913a640f4a2331b48358b81777568fb487d2757002",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "addons-877132",
"name.minikube.sigs.k8s.io": "addons-877132",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "6a0fbe5e4a1988f743bcdf7dea1f27c6a575bb4991e0dc783f167f6a2c62a4ac",
"SandboxKey": "/var/run/docker/netns/6a0fbe5e4a19",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32768"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32769"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32772"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32770"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32771"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"addons-877132": {
"IPAMConfig": {
"IPv4Address": "192.168.49.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "02:42:c0:a8:31:02",
"DriverOpts": null,
"NetworkID": "92741e9c6adef761a12cc5aa129b7ea5de95847ec3af60896db99bb0f8592a7c",
"EndpointID": "e03ef1cf5500ca2f0df1215461c824d1aaac3f152cbba89e7dd5d59184418014",
"Gateway": "192.168.49.1",
"IPAddress": "192.168.49.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"addons-877132",
"0a128850adc6"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:239: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p addons-877132 -n addons-877132
helpers_test.go:244: <<< TestAddons/parallel/Ingress FAILED: start of post-mortem logs <<<
helpers_test.go:245: ======> post-mortem[TestAddons/parallel/Ingress]: minikube logs <======
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 -p addons-877132 logs -n 25
helpers_test.go:247: (dbg) Done: out/minikube-linux-amd64 -p addons-877132 logs -n 25: (1.032065266s)
helpers_test.go:252: TestAddons/parallel/Ingress logs:
-- stdout --
==> Audit <==
|---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
| Command | Args | Profile | User | Version | Start Time | End Time |
|---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
| start | --download-only -p | download-docker-237330 | jenkins | v1.33.1 | 15 Aug 24 00:05 UTC | |
| | download-docker-237330 | | | | | |
| | --alsologtostderr | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=crio | | | | | |
| delete | -p download-docker-237330 | download-docker-237330 | jenkins | v1.33.1 | 15 Aug 24 00:05 UTC | 15 Aug 24 00:05 UTC |
| start | --download-only -p | binary-mirror-616195 | jenkins | v1.33.1 | 15 Aug 24 00:05 UTC | |
| | binary-mirror-616195 | | | | | |
| | --alsologtostderr | | | | | |
| | --binary-mirror | | | | | |
| | http://127.0.0.1:46729 | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=crio | | | | | |
| delete | -p binary-mirror-616195 | binary-mirror-616195 | jenkins | v1.33.1 | 15 Aug 24 00:05 UTC | 15 Aug 24 00:05 UTC |
| addons | disable dashboard -p | addons-877132 | jenkins | v1.33.1 | 15 Aug 24 00:05 UTC | |
| | addons-877132 | | | | | |
| addons | enable dashboard -p | addons-877132 | jenkins | v1.33.1 | 15 Aug 24 00:05 UTC | |
| | addons-877132 | | | | | |
| start | -p addons-877132 --wait=true | addons-877132 | jenkins | v1.33.1 | 15 Aug 24 00:05 UTC | 15 Aug 24 00:08 UTC |
| | --memory=4000 --alsologtostderr | | | | | |
| | --addons=registry | | | | | |
| | --addons=metrics-server | | | | | |
| | --addons=volumesnapshots | | | | | |
| | --addons=csi-hostpath-driver | | | | | |
| | --addons=gcp-auth | | | | | |
| | --addons=cloud-spanner | | | | | |
| | --addons=inspektor-gadget | | | | | |
| | --addons=storage-provisioner-rancher | | | | | |
| | --addons=nvidia-device-plugin | | | | | |
| | --addons=yakd --addons=volcano | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=crio | | | | | |
| | --addons=ingress | | | | | |
| | --addons=ingress-dns | | | | | |
| | --addons=helm-tiller | | | | | |
| addons | addons-877132 addons disable | addons-877132 | jenkins | v1.33.1 | 15 Aug 24 00:08 UTC | 15 Aug 24 00:08 UTC |
| | gcp-auth --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| addons | addons-877132 addons disable | addons-877132 | jenkins | v1.33.1 | 15 Aug 24 00:08 UTC | 15 Aug 24 00:08 UTC |
| | yakd --alsologtostderr -v=1 | | | | | |
| ssh | addons-877132 ssh cat | addons-877132 | jenkins | v1.33.1 | 15 Aug 24 00:08 UTC | 15 Aug 24 00:08 UTC |
| | /opt/local-path-provisioner/pvc-56d7ae18-0d09-496f-9576-9fd79c71aa37_default_test-pvc/file1 | | | | | |
| addons | addons-877132 addons disable | addons-877132 | jenkins | v1.33.1 | 15 Aug 24 00:08 UTC | 15 Aug 24 00:08 UTC |
| | storage-provisioner-rancher | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | disable nvidia-device-plugin | addons-877132 | jenkins | v1.33.1 | 15 Aug 24 00:08 UTC | 15 Aug 24 00:08 UTC |
| | -p addons-877132 | | | | | |
| ip | addons-877132 ip | addons-877132 | jenkins | v1.33.1 | 15 Aug 24 00:08 UTC | 15 Aug 24 00:08 UTC |
| addons | addons-877132 addons disable | addons-877132 | jenkins | v1.33.1 | 15 Aug 24 00:08 UTC | 15 Aug 24 00:08 UTC |
| | registry --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| addons | disable cloud-spanner -p | addons-877132 | jenkins | v1.33.1 | 15 Aug 24 00:08 UTC | 15 Aug 24 00:08 UTC |
| | addons-877132 | | | | | |
| addons | addons-877132 addons disable | addons-877132 | jenkins | v1.33.1 | 15 Aug 24 00:08 UTC | 15 Aug 24 00:08 UTC |
| | helm-tiller --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| addons | enable headlamp | addons-877132 | jenkins | v1.33.1 | 15 Aug 24 00:08 UTC | 15 Aug 24 00:08 UTC |
| | -p addons-877132 | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | disable inspektor-gadget -p | addons-877132 | jenkins | v1.33.1 | 15 Aug 24 00:08 UTC | 15 Aug 24 00:09 UTC |
| | addons-877132 | | | | | |
| addons | addons-877132 addons disable | addons-877132 | jenkins | v1.33.1 | 15 Aug 24 00:09 UTC | 15 Aug 24 00:09 UTC |
| | headlamp --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| ssh | addons-877132 ssh curl -s | addons-877132 | jenkins | v1.33.1 | 15 Aug 24 00:09 UTC | |
| | http://127.0.0.1/ -H 'Host: | | | | | |
| | nginx.example.com' | | | | | |
| addons | addons-877132 addons | addons-877132 | jenkins | v1.33.1 | 15 Aug 24 00:09 UTC | 15 Aug 24 00:09 UTC |
| | disable csi-hostpath-driver | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-877132 addons | addons-877132 | jenkins | v1.33.1 | 15 Aug 24 00:09 UTC | 15 Aug 24 00:09 UTC |
| | disable volumesnapshots | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| ip | addons-877132 ip | addons-877132 | jenkins | v1.33.1 | 15 Aug 24 00:11 UTC | 15 Aug 24 00:11 UTC |
| addons | addons-877132 addons disable | addons-877132 | jenkins | v1.33.1 | 15 Aug 24 00:11 UTC | 15 Aug 24 00:11 UTC |
| | ingress-dns --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| addons | addons-877132 addons disable | addons-877132 | jenkins | v1.33.1 | 15 Aug 24 00:11 UTC | 15 Aug 24 00:11 UTC |
| | ingress --alsologtostderr -v=1 | | | | | |
|---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
==> Last Start <==
Log file created at: 2024/08/15 00:05:23
Running on machine: ubuntu-20-agent-4
Binary: Built with gc go1.22.5 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I0815 00:05:23.654201 33429 out.go:291] Setting OutFile to fd 1 ...
I0815 00:05:23.654618 33429 out.go:338] TERM=,COLORTERM=, which probably does not support color
I0815 00:05:23.654663 33429 out.go:304] Setting ErrFile to fd 2...
I0815 00:05:23.654681 33429 out.go:338] TERM=,COLORTERM=, which probably does not support color
I0815 00:05:23.655134 33429 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19443-25263/.minikube/bin
I0815 00:05:23.655982 33429 out.go:298] Setting JSON to false
I0815 00:05:23.656755 33429 start.go:129] hostinfo: {"hostname":"ubuntu-20-agent-4","uptime":6461,"bootTime":1723673863,"procs":171,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1066-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I0815 00:05:23.656807 33429 start.go:139] virtualization: kvm guest
I0815 00:05:23.658523 33429 out.go:177] * [addons-877132] minikube v1.33.1 on Ubuntu 20.04 (kvm/amd64)
I0815 00:05:23.659971 33429 notify.go:220] Checking for updates...
I0815 00:05:23.659982 33429 out.go:177] - MINIKUBE_LOCATION=19443
I0815 00:05:23.661059 33429 out.go:177] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I0815 00:05:23.662403 33429 out.go:177] - KUBECONFIG=/home/jenkins/minikube-integration/19443-25263/kubeconfig
I0815 00:05:23.663582 33429 out.go:177] - MINIKUBE_HOME=/home/jenkins/minikube-integration/19443-25263/.minikube
I0815 00:05:23.664704 33429 out.go:177] - MINIKUBE_BIN=out/minikube-linux-amd64
I0815 00:05:23.665903 33429 out.go:177] - MINIKUBE_FORCE_SYSTEMD=
I0815 00:05:23.667224 33429 driver.go:392] Setting default libvirt URI to qemu:///system
I0815 00:05:23.687835 33429 docker.go:123] docker version: linux-27.1.2:Docker Engine - Community
I0815 00:05:23.687962 33429 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0815 00:05:23.732664 33429 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:30 OomKillDisable:true NGoroutines:45 SystemTime:2024-08-15 00:05:23.724426498 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1066-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:x86
_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33647935488 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-4 Labels:[] ExperimentalBuild:false ServerVersion:27.1.2 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:8fc6bcff51318944179630522a095cc9dbf9f353 Expected:8fc6bcff51318944179630522a095cc9dbf9f353} RuncCommit:{ID:v1.1.13-0-g58aa920 Expected:v1.1.13-0-g58aa920} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErr
ors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.16.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.29.1] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0815 00:05:23.732801 33429 docker.go:307] overlay module found
I0815 00:05:23.734680 33429 out.go:177] * Using the docker driver based on user configuration
I0815 00:05:23.735854 33429 start.go:297] selected driver: docker
I0815 00:05:23.735875 33429 start.go:901] validating driver "docker" against <nil>
I0815 00:05:23.735889 33429 start.go:912] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I0815 00:05:23.736663 33429 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0815 00:05:23.783497 33429 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:30 OomKillDisable:true NGoroutines:45 SystemTime:2024-08-15 00:05:23.775412376 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1066-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:x86
_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33647935488 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-4 Labels:[] ExperimentalBuild:false ServerVersion:27.1.2 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:8fc6bcff51318944179630522a095cc9dbf9f353 Expected:8fc6bcff51318944179630522a095cc9dbf9f353} RuncCommit:{ID:v1.1.13-0-g58aa920 Expected:v1.1.13-0-g58aa920} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErr
ors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.16.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.29.1] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0815 00:05:23.783655 33429 start_flags.go:310] no existing cluster config was found, will generate one from the flags
I0815 00:05:23.783845 33429 start_flags.go:947] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0815 00:05:23.785330 33429 out.go:177] * Using Docker driver with root privileges
I0815 00:05:23.786691 33429 cni.go:84] Creating CNI manager for ""
I0815 00:05:23.786706 33429 cni.go:143] "docker" driver + "crio" runtime found, recommending kindnet
I0815 00:05:23.786715 33429 start_flags.go:319] Found "CNI" CNI - setting NetworkPlugin=cni
I0815 00:05:23.786761 33429 start.go:340] cluster config:
{Name:addons-877132 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723650208-19443@sha256:2be48dc5c74cde3c1d15ac913a640f4a2331b48358b81777568fb487d2757002 Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.0 ClusterName:addons-877132 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime
:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.31.0 ContainerRuntime:crio ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSH
AgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0815 00:05:23.787982 33429 out.go:177] * Starting "addons-877132" primary control-plane node in "addons-877132" cluster
I0815 00:05:23.789023 33429 cache.go:121] Beginning downloading kic base image for docker with crio
I0815 00:05:23.790242 33429 out.go:177] * Pulling base image v0.0.44-1723650208-19443 ...
I0815 00:05:23.791298 33429 preload.go:131] Checking if preload exists for k8s version v1.31.0 and runtime crio
I0815 00:05:23.791325 33429 preload.go:146] Found local preload: /home/jenkins/minikube-integration/19443-25263/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.0-cri-o-overlay-amd64.tar.lz4
I0815 00:05:23.791336 33429 cache.go:56] Caching tarball of preloaded images
I0815 00:05:23.791373 33429 image.go:79] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723650208-19443@sha256:2be48dc5c74cde3c1d15ac913a640f4a2331b48358b81777568fb487d2757002 in local docker daemon
I0815 00:05:23.791398 33429 preload.go:172] Found /home/jenkins/minikube-integration/19443-25263/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.0-cri-o-overlay-amd64.tar.lz4 in cache, skipping download
I0815 00:05:23.791407 33429 cache.go:59] Finished verifying existence of preloaded tar for v1.31.0 on crio
I0815 00:05:23.791714 33429 profile.go:143] Saving config to /home/jenkins/minikube-integration/19443-25263/.minikube/profiles/addons-877132/config.json ...
I0815 00:05:23.791738 33429 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19443-25263/.minikube/profiles/addons-877132/config.json: {Name:mk5c91fbc1c1fde61b892ae0ae5591fd2dd76b2a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0815 00:05:23.805688 33429 cache.go:149] Downloading gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723650208-19443@sha256:2be48dc5c74cde3c1d15ac913a640f4a2331b48358b81777568fb487d2757002 to local cache
I0815 00:05:23.805810 33429 image.go:63] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723650208-19443@sha256:2be48dc5c74cde3c1d15ac913a640f4a2331b48358b81777568fb487d2757002 in local cache directory
I0815 00:05:23.805828 33429 image.go:66] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723650208-19443@sha256:2be48dc5c74cde3c1d15ac913a640f4a2331b48358b81777568fb487d2757002 in local cache directory, skipping pull
I0815 00:05:23.805832 33429 image.go:135] gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723650208-19443@sha256:2be48dc5c74cde3c1d15ac913a640f4a2331b48358b81777568fb487d2757002 exists in cache, skipping pull
I0815 00:05:23.805840 33429 cache.go:152] successfully saved gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723650208-19443@sha256:2be48dc5c74cde3c1d15ac913a640f4a2331b48358b81777568fb487d2757002 as a tarball
I0815 00:05:23.805847 33429 cache.go:162] Loading gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723650208-19443@sha256:2be48dc5c74cde3c1d15ac913a640f4a2331b48358b81777568fb487d2757002 from local cache
I0815 00:05:35.207757 33429 cache.go:164] successfully loaded and using gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723650208-19443@sha256:2be48dc5c74cde3c1d15ac913a640f4a2331b48358b81777568fb487d2757002 from cached tarball
I0815 00:05:35.207800 33429 cache.go:194] Successfully downloaded all kic artifacts
I0815 00:05:35.207842 33429 start.go:360] acquireMachinesLock for addons-877132: {Name:mk87c4769b05652828bbd513a339608563304c52 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0815 00:05:35.207952 33429 start.go:364] duration metric: took 89.15µs to acquireMachinesLock for "addons-877132"
I0815 00:05:35.207977 33429 start.go:93] Provisioning new machine with config: &{Name:addons-877132 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723650208-19443@sha256:2be48dc5c74cde3c1d15ac913a640f4a2331b48358b81777568fb487d2757002 Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.0 ClusterName:addons-877132 Namespace:default APIServerHAVIP: APIServerName:min
ikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.31.0 ContainerRuntime:crio ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQe
muFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.31.0 ContainerRuntime:crio ControlPlane:true Worker:true}
I0815 00:05:35.208064 33429 start.go:125] createHost starting for "" (driver="docker")
I0815 00:05:35.209932 33429 out.go:204] * Creating docker container (CPUs=2, Memory=4000MB) ...
I0815 00:05:35.210140 33429 start.go:159] libmachine.API.Create for "addons-877132" (driver="docker")
I0815 00:05:35.210169 33429 client.go:168] LocalClient.Create starting
I0815 00:05:35.210265 33429 main.go:141] libmachine: Creating CA: /home/jenkins/minikube-integration/19443-25263/.minikube/certs/ca.pem
I0815 00:05:35.403780 33429 main.go:141] libmachine: Creating client certificate: /home/jenkins/minikube-integration/19443-25263/.minikube/certs/cert.pem
I0815 00:05:35.581910 33429 cli_runner.go:164] Run: docker network inspect addons-877132 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W0815 00:05:35.597259 33429 cli_runner.go:211] docker network inspect addons-877132 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I0815 00:05:35.597337 33429 network_create.go:284] running [docker network inspect addons-877132] to gather additional debugging logs...
I0815 00:05:35.597356 33429 cli_runner.go:164] Run: docker network inspect addons-877132
W0815 00:05:35.612656 33429 cli_runner.go:211] docker network inspect addons-877132 returned with exit code 1
I0815 00:05:35.612683 33429 network_create.go:287] error running [docker network inspect addons-877132]: docker network inspect addons-877132: exit status 1
stdout:
[]
stderr:
Error response from daemon: network addons-877132 not found
I0815 00:05:35.612694 33429 network_create.go:289] output of [docker network inspect addons-877132]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network addons-877132 not found
** /stderr **
I0815 00:05:35.612781 33429 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0815 00:05:35.628068 33429 network.go:206] using free private subnet 192.168.49.0/24: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc0000157c0}
I0815 00:05:35.628115 33429 network_create.go:124] attempt to create docker network addons-877132 192.168.49.0/24 with gateway 192.168.49.1 and MTU of 1500 ...
I0815 00:05:35.628158 33429 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.49.0/24 --gateway=192.168.49.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=addons-877132 addons-877132
I0815 00:05:35.684711 33429 network_create.go:108] docker network addons-877132 192.168.49.0/24 created
I0815 00:05:35.684740 33429 kic.go:121] calculated static IP "192.168.49.2" for the "addons-877132" container
I0815 00:05:35.684801 33429 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0815 00:05:35.699815 33429 cli_runner.go:164] Run: docker volume create addons-877132 --label name.minikube.sigs.k8s.io=addons-877132 --label created_by.minikube.sigs.k8s.io=true
I0815 00:05:35.715691 33429 oci.go:103] Successfully created a docker volume addons-877132
I0815 00:05:35.715787 33429 cli_runner.go:164] Run: docker run --rm --name addons-877132-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-877132 --entrypoint /usr/bin/test -v addons-877132:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723650208-19443@sha256:2be48dc5c74cde3c1d15ac913a640f4a2331b48358b81777568fb487d2757002 -d /var/lib
I0815 00:05:42.917047 33429 cli_runner.go:217] Completed: docker run --rm --name addons-877132-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-877132 --entrypoint /usr/bin/test -v addons-877132:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723650208-19443@sha256:2be48dc5c74cde3c1d15ac913a640f4a2331b48358b81777568fb487d2757002 -d /var/lib: (7.201218931s)
I0815 00:05:42.917075 33429 oci.go:107] Successfully prepared a docker volume addons-877132
I0815 00:05:42.917090 33429 preload.go:131] Checking if preload exists for k8s version v1.31.0 and runtime crio
I0815 00:05:42.917109 33429 kic.go:194] Starting extracting preloaded images to volume ...
I0815 00:05:42.917177 33429 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/19443-25263/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.0-cri-o-overlay-amd64.tar.lz4:/preloaded.tar:ro -v addons-877132:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723650208-19443@sha256:2be48dc5c74cde3c1d15ac913a640f4a2331b48358b81777568fb487d2757002 -I lz4 -xf /preloaded.tar -C /extractDir
I0815 00:05:47.252511 33429 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/19443-25263/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.0-cri-o-overlay-amd64.tar.lz4:/preloaded.tar:ro -v addons-877132:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723650208-19443@sha256:2be48dc5c74cde3c1d15ac913a640f4a2331b48358b81777568fb487d2757002 -I lz4 -xf /preloaded.tar -C /extractDir: (4.335289814s)
I0815 00:05:47.252538 33429 kic.go:203] duration metric: took 4.335426883s to extract preloaded images to volume ...
W0815 00:05:47.252667 33429 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I0815 00:05:47.252767 33429 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0815 00:05:47.299562 33429 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname addons-877132 --name addons-877132 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-877132 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=addons-877132 --network addons-877132 --ip 192.168.49.2 --volume addons-877132:/var --security-opt apparmor=unconfined --memory=4000mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723650208-19443@sha256:2be48dc5c74cde3c1d15ac913a640f4a2331b48358b81777568fb487d2757002
I0815 00:05:47.614924 33429 cli_runner.go:164] Run: docker container inspect addons-877132 --format={{.State.Running}}
I0815 00:05:47.633132 33429 cli_runner.go:164] Run: docker container inspect addons-877132 --format={{.State.Status}}
I0815 00:05:47.650026 33429 cli_runner.go:164] Run: docker exec addons-877132 stat /var/lib/dpkg/alternatives/iptables
I0815 00:05:47.690704 33429 oci.go:144] the created container "addons-877132" has a running status.
I0815 00:05:47.690734 33429 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/19443-25263/.minikube/machines/addons-877132/id_rsa...
I0815 00:05:47.887374 33429 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/19443-25263/.minikube/machines/addons-877132/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0815 00:05:47.912208 33429 cli_runner.go:164] Run: docker container inspect addons-877132 --format={{.State.Status}}
I0815 00:05:47.932744 33429 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0815 00:05:47.932762 33429 kic_runner.go:114] Args: [docker exec --privileged addons-877132 chown docker:docker /home/docker/.ssh/authorized_keys]
I0815 00:05:47.981634 33429 cli_runner.go:164] Run: docker container inspect addons-877132 --format={{.State.Status}}
I0815 00:05:47.999627 33429 machine.go:94] provisionDockerMachine start ...
I0815 00:05:47.999690 33429 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877132
I0815 00:05:48.016577 33429 main.go:141] libmachine: Using SSH client type: native
I0815 00:05:48.016770 33429 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x82f9c0] 0x832720 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0815 00:05:48.016782 33429 main.go:141] libmachine: About to run SSH command:
hostname
I0815 00:05:48.232779 33429 main.go:141] libmachine: SSH cmd err, output: <nil>: addons-877132
I0815 00:05:48.232815 33429 ubuntu.go:169] provisioning hostname "addons-877132"
I0815 00:05:48.232872 33429 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877132
I0815 00:05:48.251859 33429 main.go:141] libmachine: Using SSH client type: native
I0815 00:05:48.252026 33429 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x82f9c0] 0x832720 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0815 00:05:48.252041 33429 main.go:141] libmachine: About to run SSH command:
sudo hostname addons-877132 && echo "addons-877132" | sudo tee /etc/hostname
I0815 00:05:48.391228 33429 main.go:141] libmachine: SSH cmd err, output: <nil>: addons-877132
I0815 00:05:48.391307 33429 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877132
I0815 00:05:48.407474 33429 main.go:141] libmachine: Using SSH client type: native
I0815 00:05:48.407658 33429 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x82f9c0] 0x832720 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0815 00:05:48.407674 33429 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\saddons-877132' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 addons-877132/g' /etc/hosts;
else
echo '127.0.1.1 addons-877132' | sudo tee -a /etc/hosts;
fi
fi
I0815 00:05:48.537347 33429 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0815 00:05:48.537372 33429 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/19443-25263/.minikube CaCertPath:/home/jenkins/minikube-integration/19443-25263/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/19443-25263/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/19443-25263/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/19443-25263/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/19443-25263/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/19443-25263/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/19443-25263/.minikube}
I0815 00:05:48.537409 33429 ubuntu.go:177] setting up certificates
I0815 00:05:48.537421 33429 provision.go:84] configureAuth start
I0815 00:05:48.537467 33429 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-877132
I0815 00:05:48.553566 33429 provision.go:143] copyHostCerts
I0815 00:05:48.553637 33429 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19443-25263/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/19443-25263/.minikube/key.pem (1675 bytes)
I0815 00:05:48.553746 33429 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19443-25263/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/19443-25263/.minikube/ca.pem (1078 bytes)
I0815 00:05:48.553868 33429 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19443-25263/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/19443-25263/.minikube/cert.pem (1123 bytes)
I0815 00:05:48.553930 33429 provision.go:117] generating server cert: /home/jenkins/minikube-integration/19443-25263/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/19443-25263/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/19443-25263/.minikube/certs/ca-key.pem org=jenkins.addons-877132 san=[127.0.0.1 192.168.49.2 addons-877132 localhost minikube]
I0815 00:05:48.723505 33429 provision.go:177] copyRemoteCerts
I0815 00:05:48.723557 33429 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0815 00:05:48.723588 33429 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877132
I0815 00:05:48.739526 33429 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19443-25263/.minikube/machines/addons-877132/id_rsa Username:docker}
I0815 00:05:48.837635 33429 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19443-25263/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I0815 00:05:48.857192 33429 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19443-25263/.minikube/machines/server.pem --> /etc/docker/server.pem (1208 bytes)
I0815 00:05:48.876384 33429 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19443-25263/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I0815 00:05:48.895738 33429 provision.go:87] duration metric: took 358.301506ms to configureAuth
I0815 00:05:48.895761 33429 ubuntu.go:193] setting minikube options for container-runtime
I0815 00:05:48.895946 33429 config.go:182] Loaded profile config "addons-877132": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.31.0
I0815 00:05:48.896036 33429 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877132
I0815 00:05:48.911607 33429 main.go:141] libmachine: Using SSH client type: native
I0815 00:05:48.911755 33429 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x82f9c0] 0x832720 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0815 00:05:48.911770 33429 main.go:141] libmachine: About to run SSH command:
sudo mkdir -p /etc/sysconfig && printf %!s(MISSING) "
CRIO_MINIKUBE_OPTIONS='--insecure-registry 10.96.0.0/12 '
" | sudo tee /etc/sysconfig/crio.minikube && sudo systemctl restart crio
I0815 00:05:49.120408 33429 main.go:141] libmachine: SSH cmd err, output: <nil>:
CRIO_MINIKUBE_OPTIONS='--insecure-registry 10.96.0.0/12 '
I0815 00:05:49.120437 33429 machine.go:97] duration metric: took 1.12079224s to provisionDockerMachine
I0815 00:05:49.120452 33429 client.go:171] duration metric: took 13.910275572s to LocalClient.Create
I0815 00:05:49.120476 33429 start.go:167] duration metric: took 13.910334619s to libmachine.API.Create "addons-877132"
I0815 00:05:49.120490 33429 start.go:293] postStartSetup for "addons-877132" (driver="docker")
I0815 00:05:49.120505 33429 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0815 00:05:49.120592 33429 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0815 00:05:49.120645 33429 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877132
I0815 00:05:49.135907 33429 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19443-25263/.minikube/machines/addons-877132/id_rsa Username:docker}
I0815 00:05:49.229819 33429 ssh_runner.go:195] Run: cat /etc/os-release
I0815 00:05:49.232457 33429 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0815 00:05:49.232497 33429 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0815 00:05:49.232511 33429 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0815 00:05:49.232522 33429 info.go:137] Remote host: Ubuntu 22.04.4 LTS
I0815 00:05:49.232534 33429 filesync.go:126] Scanning /home/jenkins/minikube-integration/19443-25263/.minikube/addons for local assets ...
I0815 00:05:49.232593 33429 filesync.go:126] Scanning /home/jenkins/minikube-integration/19443-25263/.minikube/files for local assets ...
I0815 00:05:49.232614 33429 start.go:296] duration metric: took 112.117099ms for postStartSetup
I0815 00:05:49.232863 33429 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-877132
I0815 00:05:49.248484 33429 profile.go:143] Saving config to /home/jenkins/minikube-integration/19443-25263/.minikube/profiles/addons-877132/config.json ...
I0815 00:05:49.248733 33429 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0815 00:05:49.248790 33429 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877132
I0815 00:05:49.263312 33429 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19443-25263/.minikube/machines/addons-877132/id_rsa Username:docker}
I0815 00:05:49.354018 33429 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0815 00:05:49.357822 33429 start.go:128] duration metric: took 14.149744159s to createHost
I0815 00:05:49.357843 33429 start.go:83] releasing machines lock for "addons-877132", held for 14.149879091s
I0815 00:05:49.357891 33429 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-877132
I0815 00:05:49.373827 33429 ssh_runner.go:195] Run: cat /version.json
I0815 00:05:49.373875 33429 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877132
I0815 00:05:49.373874 33429 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0815 00:05:49.373952 33429 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877132
I0815 00:05:49.388848 33429 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19443-25263/.minikube/machines/addons-877132/id_rsa Username:docker}
I0815 00:05:49.389550 33429 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19443-25263/.minikube/machines/addons-877132/id_rsa Username:docker}
I0815 00:05:49.544079 33429 ssh_runner.go:195] Run: systemctl --version
I0815 00:05:49.547823 33429 ssh_runner.go:195] Run: sudo sh -c "podman version >/dev/null"
I0815 00:05:49.682891 33429 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0815 00:05:49.686787 33429 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0815 00:05:49.702937 33429 cni.go:221] loopback cni configuration disabled: "/etc/cni/net.d/*loopback.conf*" found
I0815 00:05:49.703005 33429 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%!p(MISSING), " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0815 00:05:49.726571 33429 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I0815 00:05:49.726594 33429 start.go:495] detecting cgroup driver to use...
I0815 00:05:49.726621 33429 detect.go:187] detected "cgroupfs" cgroup driver on host os
I0815 00:05:49.726658 33429 ssh_runner.go:195] Run: sudo systemctl stop -f containerd
I0815 00:05:49.739246 33429 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd
I0815 00:05:49.748243 33429 docker.go:217] disabling cri-docker service (if available) ...
I0815 00:05:49.748292 33429 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I0815 00:05:49.759758 33429 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I0815 00:05:49.771605 33429 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I0815 00:05:49.845117 33429 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I0815 00:05:49.920932 33429 docker.go:233] disabling docker service ...
I0815 00:05:49.920986 33429 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I0815 00:05:49.936575 33429 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I0815 00:05:49.945679 33429 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I0815 00:05:50.020526 33429 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I0815 00:05:50.097001 33429 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I0815 00:05:50.106254 33429 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %!s(MISSING) "runtime-endpoint: unix:///var/run/crio/crio.sock
" | sudo tee /etc/crictl.yaml"
I0815 00:05:50.119192 33429 crio.go:59] configure cri-o to use "registry.k8s.io/pause:3.10" pause image...
I0815 00:05:50.119247 33429 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|^.*pause_image = .*$|pause_image = "registry.k8s.io/pause:3.10"|' /etc/crio/crio.conf.d/02-crio.conf"
I0815 00:05:50.126943 33429 crio.go:70] configuring cri-o to use "cgroupfs" as cgroup driver...
I0815 00:05:50.126988 33429 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|^.*cgroup_manager = .*$|cgroup_manager = "cgroupfs"|' /etc/crio/crio.conf.d/02-crio.conf"
I0815 00:05:50.134580 33429 ssh_runner.go:195] Run: sh -c "sudo sed -i '/conmon_cgroup = .*/d' /etc/crio/crio.conf.d/02-crio.conf"
I0815 00:05:50.142147 33429 ssh_runner.go:195] Run: sh -c "sudo sed -i '/cgroup_manager = .*/a conmon_cgroup = "pod"' /etc/crio/crio.conf.d/02-crio.conf"
I0815 00:05:50.149864 33429 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0815 00:05:50.156952 33429 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *"net.ipv4.ip_unprivileged_port_start=.*"/d' /etc/crio/crio.conf.d/02-crio.conf"
I0815 00:05:50.164563 33429 ssh_runner.go:195] Run: sh -c "sudo grep -q "^ *default_sysctls" /etc/crio/crio.conf.d/02-crio.conf || sudo sed -i '/conmon_cgroup = .*/a default_sysctls = \[\n\]' /etc/crio/crio.conf.d/02-crio.conf"
I0815 00:05:50.177100 33429 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^default_sysctls *= *\[|&\n "net.ipv4.ip_unprivileged_port_start=0",|' /etc/crio/crio.conf.d/02-crio.conf"
I0815 00:05:50.184728 33429 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0815 00:05:50.191170 33429 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0815 00:05:50.197628 33429 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0815 00:05:50.267275 33429 ssh_runner.go:195] Run: sudo systemctl restart crio
I0815 00:05:50.361312 33429 start.go:542] Will wait 60s for socket path /var/run/crio/crio.sock
I0815 00:05:50.361385 33429 ssh_runner.go:195] Run: stat /var/run/crio/crio.sock
I0815 00:05:50.364378 33429 start.go:563] Will wait 60s for crictl version
I0815 00:05:50.364426 33429 ssh_runner.go:195] Run: which crictl
I0815 00:05:50.367117 33429 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0815 00:05:50.397013 33429 start.go:579] Version: 0.1.0
RuntimeName: cri-o
RuntimeVersion: 1.24.6
RuntimeApiVersion: v1
I0815 00:05:50.397116 33429 ssh_runner.go:195] Run: crio --version
I0815 00:05:50.429244 33429 ssh_runner.go:195] Run: crio --version
I0815 00:05:50.461529 33429 out.go:177] * Preparing Kubernetes v1.31.0 on CRI-O 1.24.6 ...
I0815 00:05:50.462727 33429 cli_runner.go:164] Run: docker network inspect addons-877132 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0815 00:05:50.477480 33429 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I0815 00:05:50.480493 33429 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0815 00:05:50.489550 33429 kubeadm.go:883] updating cluster {Name:addons-877132 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723650208-19443@sha256:2be48dc5c74cde3c1d15ac913a640f4a2331b48358b81777568fb487d2757002 Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.0 ClusterName:addons-877132 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNa
mes:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.31.0 ContainerRuntime:crio ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmw
arePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I0815 00:05:50.489649 33429 preload.go:131] Checking if preload exists for k8s version v1.31.0 and runtime crio
I0815 00:05:50.489701 33429 ssh_runner.go:195] Run: sudo crictl images --output json
I0815 00:05:50.550221 33429 crio.go:514] all images are preloaded for cri-o runtime.
I0815 00:05:50.550242 33429 crio.go:433] Images already preloaded, skipping extraction
I0815 00:05:50.550279 33429 ssh_runner.go:195] Run: sudo crictl images --output json
I0815 00:05:50.579201 33429 crio.go:514] all images are preloaded for cri-o runtime.
I0815 00:05:50.579222 33429 cache_images.go:84] Images are preloaded, skipping loading
I0815 00:05:50.579229 33429 kubeadm.go:934] updating node { 192.168.49.2 8443 v1.31.0 crio true true} ...
I0815 00:05:50.579313 33429 kubeadm.go:946] kubelet [Unit]
Wants=crio.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.31.0/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroups-per-qos=false --config=/var/lib/kubelet/config.yaml --enforce-node-allocatable= --hostname-override=addons-877132 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.2
[Install]
config:
{KubernetesVersion:v1.31.0 ClusterName:addons-877132 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0815 00:05:50.579367 33429 ssh_runner.go:195] Run: crio config
I0815 00:05:50.616570 33429 cni.go:84] Creating CNI manager for ""
I0815 00:05:50.616587 33429 cni.go:143] "docker" driver + "crio" runtime found, recommending kindnet
I0815 00:05:50.616596 33429 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
I0815 00:05:50.616615 33429 kubeadm.go:181] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8443 KubernetesVersion:v1.31.0 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:addons-877132 NodeName:addons-877132 DNSDomain:cluster.local CRISocket:/var/run/crio/crio.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.49.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kuberne
tes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/crio/crio.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I0815 00:05:50.616737 33429 kubeadm.go:187] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.49.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/crio/crio.sock
name: "addons-877132"
kubeletExtraArgs:
node-ip: 192.168.49.2
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.31.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///var/run/crio/crio.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%!"(MISSING)
nodefs.inodesFree: "0%!"(MISSING)
imagefs.available: "0%!"(MISSING)
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I0815 00:05:50.616787 33429 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.31.0
I0815 00:05:50.624272 33429 binaries.go:44] Found k8s binaries, skipping transfer
I0815 00:05:50.624316 33429 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I0815 00:05:50.631299 33429 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (363 bytes)
I0815 00:05:50.645652 33429 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0815 00:05:50.660401 33429 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2151 bytes)
I0815 00:05:50.674927 33429 ssh_runner.go:195] Run: grep 192.168.49.2 control-plane.minikube.internal$ /etc/hosts
I0815 00:05:50.677624 33429 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0815 00:05:50.686437 33429 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0815 00:05:50.757391 33429 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0815 00:05:50.768422 33429 certs.go:68] Setting up /home/jenkins/minikube-integration/19443-25263/.minikube/profiles/addons-877132 for IP: 192.168.49.2
I0815 00:05:50.768442 33429 certs.go:194] generating shared ca certs ...
I0815 00:05:50.768461 33429 certs.go:226] acquiring lock for ca certs: {Name:mk309157fa54119ea004edf6a36596f33b512455 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0815 00:05:50.768591 33429 certs.go:240] generating "minikubeCA" ca cert: /home/jenkins/minikube-integration/19443-25263/.minikube/ca.key
I0815 00:05:51.184009 33429 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19443-25263/.minikube/ca.crt ...
I0815 00:05:51.184041 33429 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19443-25263/.minikube/ca.crt: {Name:mk2281b087378b5171f6a3ababac7c23d91f7a2d Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0815 00:05:51.184205 33429 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19443-25263/.minikube/ca.key ...
I0815 00:05:51.184215 33429 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19443-25263/.minikube/ca.key: {Name:mk7f28e7104766f3bc3ab7a26fee1d70165eac48 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0815 00:05:51.184292 33429 certs.go:240] generating "proxyClientCA" ca cert: /home/jenkins/minikube-integration/19443-25263/.minikube/proxy-client-ca.key
I0815 00:05:51.306696 33429 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19443-25263/.minikube/proxy-client-ca.crt ...
I0815 00:05:51.306724 33429 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19443-25263/.minikube/proxy-client-ca.crt: {Name:mk007ceaa696b48cf9b73125039c9ff11d73a36e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0815 00:05:51.306876 33429 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19443-25263/.minikube/proxy-client-ca.key ...
I0815 00:05:51.306886 33429 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19443-25263/.minikube/proxy-client-ca.key: {Name:mk6d0aefb75ddffa612443a728f4dc6aa04f663c Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0815 00:05:51.307002 33429 certs.go:256] generating profile certs ...
I0815 00:05:51.307058 33429 certs.go:363] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/19443-25263/.minikube/profiles/addons-877132/client.key
I0815 00:05:51.307071 33429 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19443-25263/.minikube/profiles/addons-877132/client.crt with IP's: []
I0815 00:05:51.500129 33429 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19443-25263/.minikube/profiles/addons-877132/client.crt ...
I0815 00:05:51.500154 33429 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19443-25263/.minikube/profiles/addons-877132/client.crt: {Name:mk439bedf422c6d72db5acc435a7cea939a2f4f5 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0815 00:05:51.500292 33429 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19443-25263/.minikube/profiles/addons-877132/client.key ...
I0815 00:05:51.500301 33429 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19443-25263/.minikube/profiles/addons-877132/client.key: {Name:mk3dc5113cd977cffed1c4766b6188c8c37f9ef1 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0815 00:05:51.500364 33429 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/19443-25263/.minikube/profiles/addons-877132/apiserver.key.e7c27cbf
I0815 00:05:51.500381 33429 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19443-25263/.minikube/profiles/addons-877132/apiserver.crt.e7c27cbf with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.49.2]
I0815 00:05:51.609033 33429 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19443-25263/.minikube/profiles/addons-877132/apiserver.crt.e7c27cbf ...
I0815 00:05:51.609058 33429 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19443-25263/.minikube/profiles/addons-877132/apiserver.crt.e7c27cbf: {Name:mk6703eb6edd26daf5046bd4ca2b634b9cafdd88 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0815 00:05:51.609196 33429 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19443-25263/.minikube/profiles/addons-877132/apiserver.key.e7c27cbf ...
I0815 00:05:51.609208 33429 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19443-25263/.minikube/profiles/addons-877132/apiserver.key.e7c27cbf: {Name:mk478e8492cd5c7d56e515385c8a0a37e3aba211 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0815 00:05:51.609275 33429 certs.go:381] copying /home/jenkins/minikube-integration/19443-25263/.minikube/profiles/addons-877132/apiserver.crt.e7c27cbf -> /home/jenkins/minikube-integration/19443-25263/.minikube/profiles/addons-877132/apiserver.crt
I0815 00:05:51.609363 33429 certs.go:385] copying /home/jenkins/minikube-integration/19443-25263/.minikube/profiles/addons-877132/apiserver.key.e7c27cbf -> /home/jenkins/minikube-integration/19443-25263/.minikube/profiles/addons-877132/apiserver.key
I0815 00:05:51.609426 33429 certs.go:363] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/19443-25263/.minikube/profiles/addons-877132/proxy-client.key
I0815 00:05:51.609444 33429 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19443-25263/.minikube/profiles/addons-877132/proxy-client.crt with IP's: []
I0815 00:05:51.900454 33429 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19443-25263/.minikube/profiles/addons-877132/proxy-client.crt ...
I0815 00:05:51.900483 33429 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19443-25263/.minikube/profiles/addons-877132/proxy-client.crt: {Name:mkc962b237253f5c62e68e3c76301d6fa0e4fa6c Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0815 00:05:51.900657 33429 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19443-25263/.minikube/profiles/addons-877132/proxy-client.key ...
I0815 00:05:51.900668 33429 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19443-25263/.minikube/profiles/addons-877132/proxy-client.key: {Name:mk276eb8609a41c9cf483090c2f7a4fd7e3e1b33 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0815 00:05:51.900838 33429 certs.go:484] found cert: /home/jenkins/minikube-integration/19443-25263/.minikube/certs/ca-key.pem (1675 bytes)
I0815 00:05:51.900870 33429 certs.go:484] found cert: /home/jenkins/minikube-integration/19443-25263/.minikube/certs/ca.pem (1078 bytes)
I0815 00:05:51.900893 33429 certs.go:484] found cert: /home/jenkins/minikube-integration/19443-25263/.minikube/certs/cert.pem (1123 bytes)
I0815 00:05:51.900916 33429 certs.go:484] found cert: /home/jenkins/minikube-integration/19443-25263/.minikube/certs/key.pem (1675 bytes)
I0815 00:05:51.901483 33429 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19443-25263/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0815 00:05:51.921595 33429 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19443-25263/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
I0815 00:05:51.940717 33429 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19443-25263/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0815 00:05:51.960157 33429 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19443-25263/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I0815 00:05:51.979624 33429 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19443-25263/.minikube/profiles/addons-877132/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1419 bytes)
I0815 00:05:51.998486 33429 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19443-25263/.minikube/profiles/addons-877132/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
I0815 00:05:52.017320 33429 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19443-25263/.minikube/profiles/addons-877132/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0815 00:05:52.037272 33429 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19443-25263/.minikube/profiles/addons-877132/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I0815 00:05:52.056417 33429 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19443-25263/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0815 00:05:52.076144 33429 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I0815 00:05:52.090393 33429 ssh_runner.go:195] Run: openssl version
I0815 00:05:52.094916 33429 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0815 00:05:52.102405 33429 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0815 00:05:52.105121 33429 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Aug 15 00:05 /usr/share/ca-certificates/minikubeCA.pem
I0815 00:05:52.105164 33429 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0815 00:05:52.110939 33429 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0815 00:05:52.118348 33429 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0815 00:05:52.120909 33429 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I0815 00:05:52.120944 33429 kubeadm.go:392] StartCluster: {Name:addons-877132 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1723650208-19443@sha256:2be48dc5c74cde3c1d15ac913a640f4a2331b48358b81777568fb487d2757002 Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.0 ClusterName:addons-877132 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames
:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:crio CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.31.0 ContainerRuntime:crio ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmware
Path: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0815 00:05:52.121035 33429 cri.go:54] listing CRI containers in root : {State:paused Name: Namespaces:[kube-system]}
I0815 00:05:52.121078 33429 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I0815 00:05:52.150788 33429 cri.go:89] found id: ""
I0815 00:05:52.150851 33429 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I0815 00:05:52.158002 33429 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I0815 00:05:52.165020 33429 kubeadm.go:214] ignoring SystemVerification for kubeadm because of docker driver
I0815 00:05:52.165057 33429 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I0815 00:05:52.172493 33429 kubeadm.go:155] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I0815 00:05:52.172506 33429 kubeadm.go:157] found existing configuration files:
I0815 00:05:52.172543 33429 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I0815 00:05:52.179306 33429 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I0815 00:05:52.179343 33429 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I0815 00:05:52.186501 33429 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I0815 00:05:52.193388 33429 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I0815 00:05:52.193429 33429 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I0815 00:05:52.200229 33429 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I0815 00:05:52.207771 33429 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I0815 00:05:52.207840 33429 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I0815 00:05:52.214934 33429 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I0815 00:05:52.222802 33429 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I0815 00:05:52.222864 33429 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I0815 00:05:52.229685 33429 ssh_runner.go:286] Start: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.31.0:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I0815 00:05:52.260389 33429 kubeadm.go:310] W0815 00:05:52.259734 1303 common.go:101] your configuration file uses a deprecated API spec: "kubeadm.k8s.io/v1beta3" (kind: "ClusterConfiguration"). Please use 'kubeadm config migrate --old-config old.yaml --new-config new.yaml', which will write the new, similar spec using a newer API version.
I0815 00:05:52.260821 33429 kubeadm.go:310] W0815 00:05:52.260363 1303 common.go:101] your configuration file uses a deprecated API spec: "kubeadm.k8s.io/v1beta3" (kind: "InitConfiguration"). Please use 'kubeadm config migrate --old-config old.yaml --new-config new.yaml', which will write the new, similar spec using a newer API version.
I0815 00:05:52.276476 33429 kubeadm.go:310] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1066-gcp\n", err: exit status 1
I0815 00:05:52.324462 33429 kubeadm.go:310] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I0815 00:06:00.767633 33429 kubeadm.go:310] [init] Using Kubernetes version: v1.31.0
I0815 00:06:00.767703 33429 kubeadm.go:310] [preflight] Running pre-flight checks
I0815 00:06:00.767862 33429 kubeadm.go:310] [preflight] The system verification failed. Printing the output from the verification:
I0815 00:06:00.767927 33429 kubeadm.go:310] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1066-gcp[0m
I0815 00:06:00.767962 33429 kubeadm.go:310] [0;37mOS[0m: [0;32mLinux[0m
I0815 00:06:00.768007 33429 kubeadm.go:310] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I0815 00:06:00.768077 33429 kubeadm.go:310] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I0815 00:06:00.768149 33429 kubeadm.go:310] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I0815 00:06:00.768219 33429 kubeadm.go:310] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I0815 00:06:00.768289 33429 kubeadm.go:310] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I0815 00:06:00.768359 33429 kubeadm.go:310] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I0815 00:06:00.768410 33429 kubeadm.go:310] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I0815 00:06:00.768473 33429 kubeadm.go:310] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I0815 00:06:00.768532 33429 kubeadm.go:310] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I0815 00:06:00.768655 33429 kubeadm.go:310] [preflight] Pulling images required for setting up a Kubernetes cluster
I0815 00:06:00.768793 33429 kubeadm.go:310] [preflight] This might take a minute or two, depending on the speed of your internet connection
I0815 00:06:00.768925 33429 kubeadm.go:310] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I0815 00:06:00.769001 33429 kubeadm.go:310] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I0815 00:06:00.770536 33429 out.go:204] - Generating certificates and keys ...
I0815 00:06:00.770633 33429 kubeadm.go:310] [certs] Using existing ca certificate authority
I0815 00:06:00.770715 33429 kubeadm.go:310] [certs] Using existing apiserver certificate and key on disk
I0815 00:06:00.770788 33429 kubeadm.go:310] [certs] Generating "apiserver-kubelet-client" certificate and key
I0815 00:06:00.770862 33429 kubeadm.go:310] [certs] Generating "front-proxy-ca" certificate and key
I0815 00:06:00.770939 33429 kubeadm.go:310] [certs] Generating "front-proxy-client" certificate and key
I0815 00:06:00.771012 33429 kubeadm.go:310] [certs] Generating "etcd/ca" certificate and key
I0815 00:06:00.771100 33429 kubeadm.go:310] [certs] Generating "etcd/server" certificate and key
I0815 00:06:00.771216 33429 kubeadm.go:310] [certs] etcd/server serving cert is signed for DNS names [addons-877132 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0815 00:06:00.771279 33429 kubeadm.go:310] [certs] Generating "etcd/peer" certificate and key
I0815 00:06:00.771436 33429 kubeadm.go:310] [certs] etcd/peer serving cert is signed for DNS names [addons-877132 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0815 00:06:00.771528 33429 kubeadm.go:310] [certs] Generating "etcd/healthcheck-client" certificate and key
I0815 00:06:00.771617 33429 kubeadm.go:310] [certs] Generating "apiserver-etcd-client" certificate and key
I0815 00:06:00.771655 33429 kubeadm.go:310] [certs] Generating "sa" key and public key
I0815 00:06:00.771707 33429 kubeadm.go:310] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I0815 00:06:00.771747 33429 kubeadm.go:310] [kubeconfig] Writing "admin.conf" kubeconfig file
I0815 00:06:00.771799 33429 kubeadm.go:310] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I0815 00:06:00.771847 33429 kubeadm.go:310] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I0815 00:06:00.771896 33429 kubeadm.go:310] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I0815 00:06:00.771941 33429 kubeadm.go:310] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I0815 00:06:00.772003 33429 kubeadm.go:310] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I0815 00:06:00.772075 33429 kubeadm.go:310] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I0815 00:06:00.773209 33429 out.go:204] - Booting up control plane ...
I0815 00:06:00.773295 33429 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-apiserver"
I0815 00:06:00.773364 33429 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I0815 00:06:00.773424 33429 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-scheduler"
I0815 00:06:00.773510 33429 kubeadm.go:310] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I0815 00:06:00.773602 33429 kubeadm.go:310] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I0815 00:06:00.773645 33429 kubeadm.go:310] [kubelet-start] Starting the kubelet
I0815 00:06:00.773767 33429 kubeadm.go:310] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I0815 00:06:00.773912 33429 kubeadm.go:310] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I0815 00:06:00.773971 33429 kubeadm.go:310] [kubelet-check] The kubelet is healthy after 501.387534ms
I0815 00:06:00.774033 33429 kubeadm.go:310] [api-check] Waiting for a healthy API server. This can take up to 4m0s
I0815 00:06:00.774089 33429 kubeadm.go:310] [api-check] The API server is healthy after 4.001373443s
I0815 00:06:00.774175 33429 kubeadm.go:310] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I0815 00:06:00.774282 33429 kubeadm.go:310] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I0815 00:06:00.774335 33429 kubeadm.go:310] [upload-certs] Skipping phase. Please see --upload-certs
I0815 00:06:00.774487 33429 kubeadm.go:310] [mark-control-plane] Marking the node addons-877132 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I0815 00:06:00.774541 33429 kubeadm.go:310] [bootstrap-token] Using token: 9cd728.sstuwlg203zlj5vt
I0815 00:06:00.775824 33429 out.go:204] - Configuring RBAC rules ...
I0815 00:06:00.775911 33429 kubeadm.go:310] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I0815 00:06:00.775980 33429 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I0815 00:06:00.776107 33429 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I0815 00:06:00.776230 33429 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I0815 00:06:00.776336 33429 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I0815 00:06:00.776409 33429 kubeadm.go:310] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I0815 00:06:00.776498 33429 kubeadm.go:310] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I0815 00:06:00.776540 33429 kubeadm.go:310] [addons] Applied essential addon: CoreDNS
I0815 00:06:00.776577 33429 kubeadm.go:310] [addons] Applied essential addon: kube-proxy
I0815 00:06:00.776582 33429 kubeadm.go:310]
I0815 00:06:00.776628 33429 kubeadm.go:310] Your Kubernetes control-plane has initialized successfully!
I0815 00:06:00.776633 33429 kubeadm.go:310]
I0815 00:06:00.776733 33429 kubeadm.go:310] To start using your cluster, you need to run the following as a regular user:
I0815 00:06:00.776748 33429 kubeadm.go:310]
I0815 00:06:00.776790 33429 kubeadm.go:310] mkdir -p $HOME/.kube
I0815 00:06:00.776837 33429 kubeadm.go:310] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I0815 00:06:00.776884 33429 kubeadm.go:310] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I0815 00:06:00.776897 33429 kubeadm.go:310]
I0815 00:06:00.776948 33429 kubeadm.go:310] Alternatively, if you are the root user, you can run:
I0815 00:06:00.776954 33429 kubeadm.go:310]
I0815 00:06:00.777017 33429 kubeadm.go:310] export KUBECONFIG=/etc/kubernetes/admin.conf
I0815 00:06:00.777027 33429 kubeadm.go:310]
I0815 00:06:00.777098 33429 kubeadm.go:310] You should now deploy a pod network to the cluster.
I0815 00:06:00.777208 33429 kubeadm.go:310] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I0815 00:06:00.777297 33429 kubeadm.go:310] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I0815 00:06:00.777306 33429 kubeadm.go:310]
I0815 00:06:00.777383 33429 kubeadm.go:310] You can now join any number of control-plane nodes by copying certificate authorities
I0815 00:06:00.777447 33429 kubeadm.go:310] and service account keys on each node and then running the following as root:
I0815 00:06:00.777453 33429 kubeadm.go:310]
I0815 00:06:00.777520 33429 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token 9cd728.sstuwlg203zlj5vt \
I0815 00:06:00.777619 33429 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:0aaee585d8cab38ae3fe05542b0fa84d163b2d1c3df394dbd390896caee3c485 \
I0815 00:06:00.777641 33429 kubeadm.go:310] --control-plane
I0815 00:06:00.777647 33429 kubeadm.go:310]
I0815 00:06:00.777711 33429 kubeadm.go:310] Then you can join any number of worker nodes by running the following on each as root:
I0815 00:06:00.777716 33429 kubeadm.go:310]
I0815 00:06:00.777805 33429 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token 9cd728.sstuwlg203zlj5vt \
I0815 00:06:00.777934 33429 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:0aaee585d8cab38ae3fe05542b0fa84d163b2d1c3df394dbd390896caee3c485
I0815 00:06:00.777944 33429 cni.go:84] Creating CNI manager for ""
I0815 00:06:00.777950 33429 cni.go:143] "docker" driver + "crio" runtime found, recommending kindnet
I0815 00:06:00.779348 33429 out.go:177] * Configuring CNI (Container Networking Interface) ...
I0815 00:06:00.780465 33429 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I0815 00:06:00.783950 33429 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.31.0/kubectl ...
I0815 00:06:00.783963 33429 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2438 bytes)
I0815 00:06:00.799808 33429 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.0/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I0815 00:06:00.977777 33429 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I0815 00:06:00.977867 33429 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.0/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I0815 00:06:00.977880 33429 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes addons-877132 minikube.k8s.io/updated_at=2024_08_15T00_06_00_0700 minikube.k8s.io/version=v1.33.1 minikube.k8s.io/commit=a560a51f794134545edbbeb49e1ab4a0b1355168 minikube.k8s.io/name=addons-877132 minikube.k8s.io/primary=true
I0815 00:06:00.984880 33429 ops.go:34] apiserver oom_adj: -16
I0815 00:06:01.066466 33429 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0815 00:06:01.567517 33429 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0815 00:06:02.066972 33429 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0815 00:06:02.567491 33429 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0815 00:06:03.067064 33429 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0815 00:06:03.566958 33429 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0815 00:06:04.066976 33429 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0815 00:06:04.567486 33429 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0815 00:06:05.067005 33429 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0815 00:06:05.567422 33429 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.0/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0815 00:06:05.627271 33429 kubeadm.go:1113] duration metric: took 4.649454362s to wait for elevateKubeSystemPrivileges
I0815 00:06:05.627300 33429 kubeadm.go:394] duration metric: took 13.506358206s to StartCluster
I0815 00:06:05.627317 33429 settings.go:142] acquiring lock: {Name:mk24702fc665a6ffc1bd2280cb721c81d58ddde1 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0815 00:06:05.627422 33429 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/19443-25263/kubeconfig
I0815 00:06:05.627782 33429 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19443-25263/kubeconfig: {Name:mk5a4aa2b57f058fc0dbb1196c79fd5fb38108bc Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0815 00:06:05.627943 33429 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I0815 00:06:05.627954 33429 start.go:235] Will wait 6m0s for node &{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.31.0 ContainerRuntime:crio ControlPlane:true Worker:true}
I0815 00:06:05.628018 33429 addons.go:507] enable addons start: toEnable=map[ambassador:false auto-pause:false cloud-spanner:true csi-hostpath-driver:true dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:true gvisor:false headlamp:false helm-tiller:true inaccel:false ingress:true ingress-dns:true inspektor-gadget:true istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:true nvidia-device-plugin:true nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:true registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:true volcano:true volumesnapshots:true yakd:true]
I0815 00:06:05.628156 33429 config.go:182] Loaded profile config "addons-877132": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.31.0
I0815 00:06:05.628201 33429 addons.go:69] Setting cloud-spanner=true in profile "addons-877132"
I0815 00:06:05.628254 33429 addons.go:234] Setting addon cloud-spanner=true in "addons-877132"
I0815 00:06:05.628288 33429 host.go:66] Checking if "addons-877132" exists ...
I0815 00:06:05.628202 33429 addons.go:69] Setting volumesnapshots=true in profile "addons-877132"
I0815 00:06:05.628342 33429 addons.go:234] Setting addon volumesnapshots=true in "addons-877132"
I0815 00:06:05.628369 33429 host.go:66] Checking if "addons-877132" exists ...
I0815 00:06:05.628165 33429 addons.go:69] Setting nvidia-device-plugin=true in profile "addons-877132"
I0815 00:06:05.628437 33429 addons.go:234] Setting addon nvidia-device-plugin=true in "addons-877132"
I0815 00:06:05.628459 33429 host.go:66] Checking if "addons-877132" exists ...
I0815 00:06:05.628174 33429 addons.go:69] Setting registry=true in profile "addons-877132"
I0815 00:06:05.628560 33429 addons.go:234] Setting addon registry=true in "addons-877132"
I0815 00:06:05.628601 33429 host.go:66] Checking if "addons-877132" exists ...
I0815 00:06:05.628177 33429 addons.go:69] Setting metrics-server=true in profile "addons-877132"
I0815 00:06:05.628697 33429 addons.go:234] Setting addon metrics-server=true in "addons-877132"
I0815 00:06:05.628730 33429 host.go:66] Checking if "addons-877132" exists ...
I0815 00:06:05.628818 33429 cli_runner.go:164] Run: docker container inspect addons-877132 --format={{.State.Status}}
I0815 00:06:05.628181 33429 addons.go:69] Setting storage-provisioner=true in profile "addons-877132"
I0815 00:06:05.628836 33429 cli_runner.go:164] Run: docker container inspect addons-877132 --format={{.State.Status}}
I0815 00:06:05.628853 33429 addons.go:234] Setting addon storage-provisioner=true in "addons-877132"
I0815 00:06:05.628880 33429 host.go:66] Checking if "addons-877132" exists ...
I0815 00:06:05.628938 33429 cli_runner.go:164] Run: docker container inspect addons-877132 --format={{.State.Status}}
I0815 00:06:05.629027 33429 cli_runner.go:164] Run: docker container inspect addons-877132 --format={{.State.Status}}
I0815 00:06:05.629163 33429 cli_runner.go:164] Run: docker container inspect addons-877132 --format={{.State.Status}}
I0815 00:06:05.629295 33429 cli_runner.go:164] Run: docker container inspect addons-877132 --format={{.State.Status}}
I0815 00:06:05.628176 33429 addons.go:69] Setting ingress-dns=true in profile "addons-877132"
I0815 00:06:05.629708 33429 addons.go:234] Setting addon ingress-dns=true in "addons-877132"
I0815 00:06:05.629750 33429 host.go:66] Checking if "addons-877132" exists ...
I0815 00:06:05.630183 33429 cli_runner.go:164] Run: docker container inspect addons-877132 --format={{.State.Status}}
I0815 00:06:05.631193 33429 out.go:177] * Verifying Kubernetes components...
I0815 00:06:05.632576 33429 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0815 00:06:05.628189 33429 addons.go:69] Setting storage-provisioner-rancher=true in profile "addons-877132"
I0815 00:06:05.632713 33429 addons_storage_classes.go:33] enableOrDisableStorageClasses storage-provisioner-rancher=true on "addons-877132"
I0815 00:06:05.632998 33429 cli_runner.go:164] Run: docker container inspect addons-877132 --format={{.State.Status}}
I0815 00:06:05.628188 33429 addons.go:69] Setting helm-tiller=true in profile "addons-877132"
I0815 00:06:05.633347 33429 addons.go:234] Setting addon helm-tiller=true in "addons-877132"
I0815 00:06:05.628192 33429 addons.go:69] Setting ingress=true in profile "addons-877132"
I0815 00:06:05.633495 33429 addons.go:234] Setting addon ingress=true in "addons-877132"
I0815 00:06:05.633553 33429 host.go:66] Checking if "addons-877132" exists ...
I0815 00:06:05.633625 33429 host.go:66] Checking if "addons-877132" exists ...
I0815 00:06:05.628183 33429 addons.go:69] Setting inspektor-gadget=true in profile "addons-877132"
I0815 00:06:05.634070 33429 addons.go:234] Setting addon inspektor-gadget=true in "addons-877132"
I0815 00:06:05.634105 33429 host.go:66] Checking if "addons-877132" exists ...
I0815 00:06:05.634517 33429 cli_runner.go:164] Run: docker container inspect addons-877132 --format={{.State.Status}}
I0815 00:06:05.628196 33429 addons.go:69] Setting csi-hostpath-driver=true in profile "addons-877132"
I0815 00:06:05.636522 33429 cli_runner.go:164] Run: docker container inspect addons-877132 --format={{.State.Status}}
I0815 00:06:05.636547 33429 addons.go:234] Setting addon csi-hostpath-driver=true in "addons-877132"
I0815 00:06:05.636607 33429 host.go:66] Checking if "addons-877132" exists ...
I0815 00:06:05.636740 33429 cli_runner.go:164] Run: docker container inspect addons-877132 --format={{.State.Status}}
I0815 00:06:05.628163 33429 addons.go:69] Setting yakd=true in profile "addons-877132"
I0815 00:06:05.637075 33429 addons.go:234] Setting addon yakd=true in "addons-877132"
I0815 00:06:05.628197 33429 addons.go:69] Setting gcp-auth=true in profile "addons-877132"
I0815 00:06:05.637104 33429 host.go:66] Checking if "addons-877132" exists ...
I0815 00:06:05.637110 33429 mustload.go:65] Loading cluster: addons-877132
I0815 00:06:05.637330 33429 config.go:182] Loaded profile config "addons-877132": Driver=docker, ContainerRuntime=crio, KubernetesVersion=v1.31.0
I0815 00:06:05.637534 33429 cli_runner.go:164] Run: docker container inspect addons-877132 --format={{.State.Status}}
I0815 00:06:05.637642 33429 cli_runner.go:164] Run: docker container inspect addons-877132 --format={{.State.Status}}
I0815 00:06:05.628213 33429 addons.go:69] Setting default-storageclass=true in profile "addons-877132"
I0815 00:06:05.638167 33429 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "addons-877132"
I0815 00:06:05.628218 33429 addons.go:69] Setting volcano=true in profile "addons-877132"
I0815 00:06:05.643982 33429 addons.go:234] Setting addon volcano=true in "addons-877132"
I0815 00:06:05.637044 33429 cli_runner.go:164] Run: docker container inspect addons-877132 --format={{.State.Status}}
I0815 00:06:05.646007 33429 host.go:66] Checking if "addons-877132" exists ...
I0815 00:06:05.666042 33429 out.go:177] - Using image gcr.io/k8s-minikube/minikube-ingress-dns:0.0.3
I0815 00:06:05.666184 33429 cli_runner.go:164] Run: docker container inspect addons-877132 --format={{.State.Status}}
I0815 00:06:05.667416 33429 cli_runner.go:164] Run: docker container inspect addons-877132 --format={{.State.Status}}
I0815 00:06:05.668094 33429 addons.go:431] installing /etc/kubernetes/addons/ingress-dns-pod.yaml
I0815 00:06:05.668112 33429 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ingress-dns-pod.yaml (2442 bytes)
I0815 00:06:05.668158 33429 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877132
I0815 00:06:05.669804 33429 out.go:177] - Using image registry.k8s.io/sig-storage/snapshot-controller:v6.1.0
I0815 00:06:05.673019 33429 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml
I0815 00:06:05.673079 33429 ssh_runner.go:362] scp volumesnapshots/csi-hostpath-snapshotclass.yaml --> /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml (934 bytes)
I0815 00:06:05.673166 33429 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877132
I0815 00:06:05.679897 33429 out.go:177] - Using image nvcr.io/nvidia/k8s-device-plugin:v0.16.2
I0815 00:06:05.679941 33429 out.go:177] - Using image docker.io/registry:2.8.3
I0815 00:06:05.681192 33429 out.go:177] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I0815 00:06:05.681415 33429 addons.go:431] installing /etc/kubernetes/addons/nvidia-device-plugin.yaml
I0815 00:06:05.681428 33429 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/nvidia-device-plugin.yaml (1966 bytes)
I0815 00:06:05.681478 33429 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877132
I0815 00:06:05.682634 33429 addons.go:431] installing /etc/kubernetes/addons/storage-provisioner.yaml
I0815 00:06:05.682649 33429 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I0815 00:06:05.682697 33429 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877132
I0815 00:06:05.682859 33429 out.go:177] - Using image gcr.io/k8s-minikube/kube-registry-proxy:0.0.6
I0815 00:06:05.684119 33429 addons.go:431] installing /etc/kubernetes/addons/registry-rc.yaml
I0815 00:06:05.684135 33429 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-rc.yaml (860 bytes)
I0815 00:06:05.684175 33429 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877132
I0815 00:06:05.693193 33429 out.go:177] - Using image registry.k8s.io/ingress-nginx/controller:v1.11.1
I0815 00:06:05.693193 33429 out.go:177] - Using image gcr.io/cloud-spanner-emulator/emulator:1.5.22
I0815 00:06:05.694564 33429 addons.go:431] installing /etc/kubernetes/addons/deployment.yaml
I0815 00:06:05.694595 33429 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/deployment.yaml (1004 bytes)
I0815 00:06:05.694652 33429 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877132
I0815 00:06:05.696426 33429 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.1
I0815 00:06:05.697529 33429 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.1
I0815 00:06:05.699629 33429 out.go:177] - Using image registry.k8s.io/metrics-server/metrics-server:v0.7.1
I0815 00:06:05.700079 33429 addons.go:431] installing /etc/kubernetes/addons/ingress-deploy.yaml
I0815 00:06:05.700096 33429 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ingress-deploy.yaml (16078 bytes)
I0815 00:06:05.700247 33429 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877132
I0815 00:06:05.701053 33429 addons.go:431] installing /etc/kubernetes/addons/metrics-apiservice.yaml
I0815 00:06:05.701069 33429 ssh_runner.go:362] scp metrics-server/metrics-apiservice.yaml --> /etc/kubernetes/addons/metrics-apiservice.yaml (424 bytes)
I0815 00:06:05.701119 33429 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877132
I0815 00:06:05.726356 33429 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19443-25263/.minikube/machines/addons-877132/id_rsa Username:docker}
I0815 00:06:05.727572 33429 out.go:177] - Using image ghcr.io/helm/tiller:v2.17.0
W0815 00:06:05.729466 33429 out.go:239] ! Enabling 'volcano' returned an error: running callbacks: [volcano addon does not support crio]
I0815 00:06:05.734048 33429 addons.go:431] installing /etc/kubernetes/addons/helm-tiller-dp.yaml
I0815 00:06:05.734072 33429 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/helm-tiller-dp.yaml (2422 bytes)
I0815 00:06:05.734131 33429 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877132
I0815 00:06:05.739495 33429 out.go:177] - Using image ghcr.io/inspektor-gadget/inspektor-gadget:v0.31.0
I0815 00:06:05.740707 33429 addons.go:431] installing /etc/kubernetes/addons/ig-namespace.yaml
I0815 00:06:05.740722 33429 ssh_runner.go:362] scp inspektor-gadget/ig-namespace.yaml --> /etc/kubernetes/addons/ig-namespace.yaml (55 bytes)
I0815 00:06:05.740772 33429 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877132
I0815 00:06:05.742643 33429 host.go:66] Checking if "addons-877132" exists ...
I0815 00:06:05.746915 33429 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19443-25263/.minikube/machines/addons-877132/id_rsa Username:docker}
I0815 00:06:05.752420 33429 addons.go:234] Setting addon default-storageclass=true in "addons-877132"
I0815 00:06:05.752463 33429 host.go:66] Checking if "addons-877132" exists ...
I0815 00:06:05.752930 33429 cli_runner.go:164] Run: docker container inspect addons-877132 --format={{.State.Status}}
I0815 00:06:05.756364 33429 addons.go:234] Setting addon storage-provisioner-rancher=true in "addons-877132"
I0815 00:06:05.756407 33429 host.go:66] Checking if "addons-877132" exists ...
I0815 00:06:05.756866 33429 cli_runner.go:164] Run: docker container inspect addons-877132 --format={{.State.Status}}
I0815 00:06:05.764150 33429 out.go:177] - Using image docker.io/marcnuri/yakd:0.0.5
I0815 00:06:05.769890 33429 addons.go:431] installing /etc/kubernetes/addons/yakd-ns.yaml
I0815 00:06:05.769911 33429 ssh_runner.go:362] scp yakd/yakd-ns.yaml --> /etc/kubernetes/addons/yakd-ns.yaml (171 bytes)
I0815 00:06:05.769965 33429 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877132
I0815 00:06:05.771126 33429 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19443-25263/.minikube/machines/addons-877132/id_rsa Username:docker}
I0815 00:06:05.771429 33429 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19443-25263/.minikube/machines/addons-877132/id_rsa Username:docker}
I0815 00:06:05.772693 33429 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19443-25263/.minikube/machines/addons-877132/id_rsa Username:docker}
I0815 00:06:05.774045 33429 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19443-25263/.minikube/machines/addons-877132/id_rsa Username:docker}
I0815 00:06:05.785888 33429 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19443-25263/.minikube/machines/addons-877132/id_rsa Username:docker}
I0815 00:06:05.791410 33429 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19443-25263/.minikube/machines/addons-877132/id_rsa Username:docker}
I0815 00:06:05.793006 33429 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19443-25263/.minikube/machines/addons-877132/id_rsa Username:docker}
I0815 00:06:05.801917 33429 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19443-25263/.minikube/machines/addons-877132/id_rsa Username:docker}
I0815 00:06:05.801923 33429 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19443-25263/.minikube/machines/addons-877132/id_rsa Username:docker}
I0815 00:06:05.803826 33429 out.go:177] - Using image registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0
I0815 00:06:05.805212 33429 out.go:177] - Using image registry.k8s.io/sig-storage/csi-provisioner:v3.3.0
I0815 00:06:05.806397 33429 out.go:177] - Using image registry.k8s.io/sig-storage/csi-attacher:v4.0.0
I0815 00:06:05.807467 33429 out.go:177] - Using image registry.k8s.io/sig-storage/csi-external-health-monitor-controller:v0.7.0
I0815 00:06:05.808848 33429 out.go:177] - Using image registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.6.0
I0815 00:06:05.810186 33429 addons.go:431] installing /etc/kubernetes/addons/storageclass.yaml
I0815 00:06:05.810207 33429 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I0815 00:06:05.810247 33429 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877132
I0815 00:06:05.810340 33429 out.go:177] - Using image docker.io/rancher/local-path-provisioner:v0.0.22
I0815 00:06:05.811519 33429 out.go:177] - Using image registry.k8s.io/sig-storage/hostpathplugin:v1.9.0
I0815 00:06:05.812764 33429 out.go:177] - Using image docker.io/busybox:stable
I0815 00:06:05.813852 33429 out.go:177] - Using image registry.k8s.io/sig-storage/livenessprobe:v2.8.0
I0815 00:06:05.813940 33429 addons.go:431] installing /etc/kubernetes/addons/storage-provisioner-rancher.yaml
I0815 00:06:05.813956 33429 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner-rancher.yaml (3113 bytes)
I0815 00:06:05.813991 33429 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877132
I0815 00:06:05.816128 33429 out.go:177] - Using image registry.k8s.io/sig-storage/csi-resizer:v1.6.0
I0815 00:06:05.817177 33429 addons.go:431] installing /etc/kubernetes/addons/rbac-external-attacher.yaml
I0815 00:06:05.817189 33429 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-attacher.yaml --> /etc/kubernetes/addons/rbac-external-attacher.yaml (3073 bytes)
I0815 00:06:05.817233 33429 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877132
I0815 00:06:05.830418 33429 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19443-25263/.minikube/machines/addons-877132/id_rsa Username:docker}
I0815 00:06:05.830537 33429 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19443-25263/.minikube/machines/addons-877132/id_rsa Username:docker}
I0815 00:06:05.832478 33429 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19443-25263/.minikube/machines/addons-877132/id_rsa Username:docker}
W0815 00:06:05.861368 33429 sshutil.go:64] dial failure (will retry): ssh: handshake failed: EOF
I0815 00:06:05.861400 33429 retry.go:31] will retry after 244.442357ms: ssh: handshake failed: EOF
W0815 00:06:05.861473 33429 sshutil.go:64] dial failure (will retry): ssh: handshake failed: EOF
I0815 00:06:05.861481 33429 retry.go:31] will retry after 180.613371ms: ssh: handshake failed: EOF
I0815 00:06:05.878964 33429 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0815 00:06:05.879077 33429 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.31.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I0815 00:06:06.077440 33429 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I0815 00:06:06.170081 33429 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.0/kubectl apply -f /etc/kubernetes/addons/ingress-dns-pod.yaml
I0815 00:06:06.174192 33429 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.0/kubectl apply -f /etc/kubernetes/addons/nvidia-device-plugin.yaml
I0815 00:06:06.178934 33429 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.0/kubectl apply -f /etc/kubernetes/addons/ingress-deploy.yaml
I0815 00:06:06.271046 33429 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.0/kubectl apply -f /etc/kubernetes/addons/deployment.yaml
I0815 00:06:06.278098 33429 addons.go:431] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml
I0815 00:06:06.278121 33429 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshotclasses.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml (6471 bytes)
I0815 00:06:06.356678 33429 addons.go:431] installing /etc/kubernetes/addons/helm-tiller-rbac.yaml
I0815 00:06:06.356706 33429 ssh_runner.go:362] scp helm-tiller/helm-tiller-rbac.yaml --> /etc/kubernetes/addons/helm-tiller-rbac.yaml (1188 bytes)
I0815 00:06:06.359353 33429 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I0815 00:06:06.455385 33429 addons.go:431] installing /etc/kubernetes/addons/yakd-sa.yaml
I0815 00:06:06.455472 33429 ssh_runner.go:362] scp yakd/yakd-sa.yaml --> /etc/kubernetes/addons/yakd-sa.yaml (247 bytes)
I0815 00:06:06.474571 33429 addons.go:431] installing /etc/kubernetes/addons/metrics-server-deployment.yaml
I0815 00:06:06.474654 33429 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-deployment.yaml (1907 bytes)
I0815 00:06:06.554397 33429 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner-rancher.yaml
I0815 00:06:06.566563 33429 addons.go:431] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml
I0815 00:06:06.566657 33429 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshotcontents.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml (23126 bytes)
I0815 00:06:06.656051 33429 addons.go:431] installing /etc/kubernetes/addons/helm-tiller-svc.yaml
I0815 00:06:06.656137 33429 ssh_runner.go:362] scp helm-tiller/helm-tiller-svc.yaml --> /etc/kubernetes/addons/helm-tiller-svc.yaml (951 bytes)
I0815 00:06:06.656413 33429 addons.go:431] installing /etc/kubernetes/addons/ig-serviceaccount.yaml
I0815 00:06:06.656465 33429 ssh_runner.go:362] scp inspektor-gadget/ig-serviceaccount.yaml --> /etc/kubernetes/addons/ig-serviceaccount.yaml (80 bytes)
I0815 00:06:06.673757 33429 addons.go:431] installing /etc/kubernetes/addons/rbac-hostpath.yaml
I0815 00:06:06.673805 33429 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-hostpath.yaml --> /etc/kubernetes/addons/rbac-hostpath.yaml (4266 bytes)
I0815 00:06:06.677017 33429 addons.go:431] installing /etc/kubernetes/addons/yakd-crb.yaml
I0815 00:06:06.677039 33429 ssh_runner.go:362] scp yakd/yakd-crb.yaml --> /etc/kubernetes/addons/yakd-crb.yaml (422 bytes)
I0815 00:06:06.773033 33429 addons.go:431] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml
I0815 00:06:06.773062 33429 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshots.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml (19582 bytes)
I0815 00:06:06.860223 33429 addons.go:431] installing /etc/kubernetes/addons/registry-svc.yaml
I0815 00:06:06.860300 33429 ssh_runner.go:362] scp registry/registry-svc.yaml --> /etc/kubernetes/addons/registry-svc.yaml (398 bytes)
I0815 00:06:06.860566 33429 addons.go:431] installing /etc/kubernetes/addons/metrics-server-rbac.yaml
I0815 00:06:06.860609 33429 ssh_runner.go:362] scp metrics-server/metrics-server-rbac.yaml --> /etc/kubernetes/addons/metrics-server-rbac.yaml (2175 bytes)
I0815 00:06:06.868145 33429 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.0/kubectl apply -f /etc/kubernetes/addons/helm-tiller-dp.yaml -f /etc/kubernetes/addons/helm-tiller-rbac.yaml -f /etc/kubernetes/addons/helm-tiller-svc.yaml
I0815 00:06:06.960420 33429 addons.go:431] installing /etc/kubernetes/addons/yakd-svc.yaml
I0815 00:06:06.960448 33429 ssh_runner.go:362] scp yakd/yakd-svc.yaml --> /etc/kubernetes/addons/yakd-svc.yaml (412 bytes)
I0815 00:06:07.055232 33429 addons.go:431] installing /etc/kubernetes/addons/metrics-server-service.yaml
I0815 00:06:07.055261 33429 ssh_runner.go:362] scp metrics-server/metrics-server-service.yaml --> /etc/kubernetes/addons/metrics-server-service.yaml (446 bytes)
I0815 00:06:07.058115 33429 addons.go:431] installing /etc/kubernetes/addons/ig-role.yaml
I0815 00:06:07.058143 33429 ssh_runner.go:362] scp inspektor-gadget/ig-role.yaml --> /etc/kubernetes/addons/ig-role.yaml (210 bytes)
I0815 00:06:07.154264 33429 addons.go:431] installing /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml
I0815 00:06:07.154294 33429 ssh_runner.go:362] scp volumesnapshots/rbac-volume-snapshot-controller.yaml --> /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml (3545 bytes)
I0815 00:06:07.155257 33429 addons.go:431] installing /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml
I0815 00:06:07.155277 33429 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-health-monitor-controller.yaml --> /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml (3038 bytes)
I0815 00:06:07.374343 33429 addons.go:431] installing /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I0815 00:06:07.374368 33429 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml (1475 bytes)
I0815 00:06:07.374783 33429 addons.go:431] installing /etc/kubernetes/addons/ig-rolebinding.yaml
I0815 00:06:07.374806 33429 ssh_runner.go:362] scp inspektor-gadget/ig-rolebinding.yaml --> /etc/kubernetes/addons/ig-rolebinding.yaml (244 bytes)
I0815 00:06:07.455705 33429 addons.go:431] installing /etc/kubernetes/addons/yakd-dp.yaml
I0815 00:06:07.455728 33429 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/yakd-dp.yaml (2017 bytes)
I0815 00:06:07.456207 33429 addons.go:431] installing /etc/kubernetes/addons/registry-proxy.yaml
I0815 00:06:07.456223 33429 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-proxy.yaml (947 bytes)
I0815 00:06:07.568863 33429 addons.go:431] installing /etc/kubernetes/addons/ig-clusterrole.yaml
I0815 00:06:07.568893 33429 ssh_runner.go:362] scp inspektor-gadget/ig-clusterrole.yaml --> /etc/kubernetes/addons/ig-clusterrole.yaml (1485 bytes)
I0815 00:06:07.569574 33429 addons.go:431] installing /etc/kubernetes/addons/rbac-external-provisioner.yaml
I0815 00:06:07.569592 33429 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-provisioner.yaml --> /etc/kubernetes/addons/rbac-external-provisioner.yaml (4442 bytes)
I0815 00:06:07.575132 33429 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.0/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
I0815 00:06:07.659030 33429 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.31.0/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -": (1.779918509s)
I0815 00:06:07.659192 33429 start.go:971] {"host.minikube.internal": 192.168.49.1} host record injected into CoreDNS's ConfigMap
I0815 00:06:07.659130 33429 ssh_runner.go:235] Completed: sudo systemctl start kubelet: (1.780140591s)
I0815 00:06:07.660310 33429 node_ready.go:35] waiting up to 6m0s for node "addons-877132" to be "Ready" ...
I0815 00:06:07.757557 33429 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.0/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I0815 00:06:07.770064 33429 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.0/kubectl apply -f /etc/kubernetes/addons/yakd-ns.yaml -f /etc/kubernetes/addons/yakd-sa.yaml -f /etc/kubernetes/addons/yakd-crb.yaml -f /etc/kubernetes/addons/yakd-svc.yaml -f /etc/kubernetes/addons/yakd-dp.yaml
I0815 00:06:07.867105 33429 addons.go:431] installing /etc/kubernetes/addons/ig-clusterrolebinding.yaml
I0815 00:06:07.867183 33429 ssh_runner.go:362] scp inspektor-gadget/ig-clusterrolebinding.yaml --> /etc/kubernetes/addons/ig-clusterrolebinding.yaml (274 bytes)
I0815 00:06:07.965723 33429 addons.go:431] installing /etc/kubernetes/addons/rbac-external-resizer.yaml
I0815 00:06:07.965749 33429 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-resizer.yaml --> /etc/kubernetes/addons/rbac-external-resizer.yaml (2943 bytes)
I0815 00:06:08.058081 33429 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.0/kubectl apply -f /etc/kubernetes/addons/registry-rc.yaml -f /etc/kubernetes/addons/registry-svc.yaml -f /etc/kubernetes/addons/registry-proxy.yaml
I0815 00:06:08.360078 33429 addons.go:431] installing /etc/kubernetes/addons/rbac-external-snapshotter.yaml
I0815 00:06:08.360151 33429 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-snapshotter.yaml --> /etc/kubernetes/addons/rbac-external-snapshotter.yaml (3149 bytes)
I0815 00:06:08.367634 33429 addons.go:431] installing /etc/kubernetes/addons/ig-crd.yaml
I0815 00:06:08.367702 33429 ssh_runner.go:362] scp inspektor-gadget/ig-crd.yaml --> /etc/kubernetes/addons/ig-crd.yaml (5216 bytes)
I0815 00:06:08.378974 33429 kapi.go:214] "coredns" deployment in "kube-system" namespace and "addons-877132" context rescaled to 1 replicas
I0815 00:06:08.672860 33429 addons.go:431] installing /etc/kubernetes/addons/ig-daemonset.yaml
I0815 00:06:08.672881 33429 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-daemonset.yaml (7735 bytes)
I0815 00:06:08.676306 33429 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-attacher.yaml
I0815 00:06:08.676368 33429 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-attacher.yaml (2143 bytes)
I0815 00:06:08.970098 33429 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.0/kubectl apply -f /etc/kubernetes/addons/ig-namespace.yaml -f /etc/kubernetes/addons/ig-serviceaccount.yaml -f /etc/kubernetes/addons/ig-role.yaml -f /etc/kubernetes/addons/ig-rolebinding.yaml -f /etc/kubernetes/addons/ig-clusterrole.yaml -f /etc/kubernetes/addons/ig-clusterrolebinding.yaml -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-daemonset.yaml
I0815 00:06:08.970391 33429 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml
I0815 00:06:08.970437 33429 ssh_runner.go:362] scp csi-hostpath-driver/deploy/csi-hostpath-driverinfo.yaml --> /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml (1274 bytes)
I0815 00:06:09.362832 33429 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-plugin.yaml
I0815 00:06:09.362917 33429 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-plugin.yaml (8201 bytes)
I0815 00:06:09.670303 33429 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-resizer.yaml
I0815 00:06:09.670329 33429 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-resizer.yaml (2191 bytes)
I0815 00:06:09.675093 33429 node_ready.go:53] node "addons-877132" has status "Ready":"False"
I0815 00:06:09.955187 33429 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-storageclass.yaml
I0815 00:06:09.955258 33429 ssh_runner.go:362] scp csi-hostpath-driver/deploy/csi-hostpath-storageclass.yaml --> /etc/kubernetes/addons/csi-hostpath-storageclass.yaml (846 bytes)
I0815 00:06:10.169704 33429 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.0/kubectl apply -f /etc/kubernetes/addons/rbac-external-attacher.yaml -f /etc/kubernetes/addons/rbac-hostpath.yaml -f /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml -f /etc/kubernetes/addons/rbac-external-provisioner.yaml -f /etc/kubernetes/addons/rbac-external-resizer.yaml -f /etc/kubernetes/addons/rbac-external-snapshotter.yaml -f /etc/kubernetes/addons/csi-hostpath-attacher.yaml -f /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml -f /etc/kubernetes/addons/csi-hostpath-plugin.yaml -f /etc/kubernetes/addons/csi-hostpath-resizer.yaml -f /etc/kubernetes/addons/csi-hostpath-storageclass.yaml
I0815 00:06:10.455264 33429 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (4.377776639s)
I0815 00:06:10.455433 33429 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.0/kubectl apply -f /etc/kubernetes/addons/ingress-dns-pod.yaml: (4.285322408s)
I0815 00:06:10.455482 33429 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.0/kubectl apply -f /etc/kubernetes/addons/nvidia-device-plugin.yaml: (4.281226704s)
I0815 00:06:12.160045 33429 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.0/kubectl apply -f /etc/kubernetes/addons/ingress-deploy.yaml: (5.981063357s)
I0815 00:06:12.160085 33429 addons.go:475] Verifying addon ingress=true in "addons-877132"
I0815 00:06:12.160118 33429 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.0/kubectl apply -f /etc/kubernetes/addons/deployment.yaml: (5.889032263s)
I0815 00:06:12.160212 33429 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.0/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: (5.800835507s)
I0815 00:06:12.160264 33429 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.0/kubectl apply -f /etc/kubernetes/addons/storage-provisioner-rancher.yaml: (5.605841296s)
I0815 00:06:12.160307 33429 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.0/kubectl apply -f /etc/kubernetes/addons/helm-tiller-dp.yaml -f /etc/kubernetes/addons/helm-tiller-rbac.yaml -f /etc/kubernetes/addons/helm-tiller-svc.yaml: (5.292133359s)
I0815 00:06:12.160370 33429 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.0/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: (4.585212618s)
I0815 00:06:12.160706 33429 addons.go:475] Verifying addon metrics-server=true in "addons-877132"
I0815 00:06:12.162800 33429 out.go:177] * Verifying ingress addon...
I0815 00:06:12.164520 33429 node_ready.go:53] node "addons-877132" has status "Ready":"False"
I0815 00:06:12.166394 33429 kapi.go:75] Waiting for pod with label "app.kubernetes.io/name=ingress-nginx" in ns "ingress-nginx" ...
W0815 00:06:12.170677 33429 out.go:239] ! Enabling 'storage-provisioner-rancher' returned an error: running callbacks: [Error making local-path the default storage class: Error while marking storage class local-path as default: Operation cannot be fulfilled on storageclasses.storage.k8s.io "local-path": the object has been modified; please apply your changes to the latest version and try again]
I0815 00:06:12.177055 33429 kapi.go:86] Found 3 Pods for label selector app.kubernetes.io/name=ingress-nginx
I0815 00:06:12.177077 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:12.670053 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:12.964117 33429 ssh_runner.go:362] scp memory --> /var/lib/minikube/google_application_credentials.json (162 bytes)
I0815 00:06:12.964195 33429 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877132
I0815 00:06:12.990306 33429 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19443-25263/.minikube/machines/addons-877132/id_rsa Username:docker}
I0815 00:06:13.090320 33429 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.0/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: (5.33266703s)
W0815 00:06:13.090355 33429 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.0/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: Process exited with status 1
stdout:
customresourcedefinition.apiextensions.k8s.io/volumesnapshotclasses.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshotcontents.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshots.snapshot.storage.k8s.io created
serviceaccount/snapshot-controller created
clusterrole.rbac.authorization.k8s.io/snapshot-controller-runner created
clusterrolebinding.rbac.authorization.k8s.io/snapshot-controller-role created
role.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
rolebinding.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
deployment.apps/snapshot-controller created
stderr:
error: resource mapping not found for name: "csi-hostpath-snapclass" namespace: "" from "/etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml": no matches for kind "VolumeSnapshotClass" in version "snapshot.storage.k8s.io/v1"
ensure CRDs are installed first
I0815 00:06:13.090375 33429 retry.go:31] will retry after 175.622541ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.0/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: Process exited with status 1
stdout:
customresourcedefinition.apiextensions.k8s.io/volumesnapshotclasses.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshotcontents.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshots.snapshot.storage.k8s.io created
serviceaccount/snapshot-controller created
clusterrole.rbac.authorization.k8s.io/snapshot-controller-runner created
clusterrolebinding.rbac.authorization.k8s.io/snapshot-controller-role created
role.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
rolebinding.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
deployment.apps/snapshot-controller created
stderr:
error: resource mapping not found for name: "csi-hostpath-snapclass" namespace: "" from "/etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml": no matches for kind "VolumeSnapshotClass" in version "snapshot.storage.k8s.io/v1"
ensure CRDs are installed first
I0815 00:06:13.090390 33429 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.0/kubectl apply -f /etc/kubernetes/addons/yakd-ns.yaml -f /etc/kubernetes/addons/yakd-sa.yaml -f /etc/kubernetes/addons/yakd-crb.yaml -f /etc/kubernetes/addons/yakd-svc.yaml -f /etc/kubernetes/addons/yakd-dp.yaml: (5.320236356s)
I0815 00:06:13.090435 33429 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.0/kubectl apply -f /etc/kubernetes/addons/registry-rc.yaml -f /etc/kubernetes/addons/registry-svc.yaml -f /etc/kubernetes/addons/registry-proxy.yaml: (5.032315229s)
I0815 00:06:13.090462 33429 addons.go:475] Verifying addon registry=true in "addons-877132"
I0815 00:06:13.090501 33429 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.0/kubectl apply -f /etc/kubernetes/addons/ig-namespace.yaml -f /etc/kubernetes/addons/ig-serviceaccount.yaml -f /etc/kubernetes/addons/ig-role.yaml -f /etc/kubernetes/addons/ig-rolebinding.yaml -f /etc/kubernetes/addons/ig-clusterrole.yaml -f /etc/kubernetes/addons/ig-clusterrolebinding.yaml -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-daemonset.yaml: (4.120304899s)
I0815 00:06:13.091944 33429 out.go:177] * To access YAKD - Kubernetes Dashboard, wait for Pod to be ready and run the following command:
minikube -p addons-877132 service yakd-dashboard -n yakd-dashboard
I0815 00:06:13.091950 33429 out.go:177] * Verifying registry addon...
I0815 00:06:13.093755 33429 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=registry" in ns "kube-system" ...
I0815 00:06:13.157110 33429 kapi.go:86] Found 1 Pods for label selector kubernetes.io/minikube-addons=registry
I0815 00:06:13.157140 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0815 00:06:13.171785 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:13.256795 33429 ssh_runner.go:362] scp memory --> /var/lib/minikube/google_cloud_project (12 bytes)
I0815 00:06:13.266211 33429 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.0/kubectl apply --force -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I0815 00:06:13.275869 33429 addons.go:234] Setting addon gcp-auth=true in "addons-877132"
I0815 00:06:13.275940 33429 host.go:66] Checking if "addons-877132" exists ...
I0815 00:06:13.276428 33429 cli_runner.go:164] Run: docker container inspect addons-877132 --format={{.State.Status}}
I0815 00:06:13.297887 33429 ssh_runner.go:195] Run: cat /var/lib/minikube/google_application_credentials.json
I0815 00:06:13.297942 33429 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877132
I0815 00:06:13.314684 33429 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19443-25263/.minikube/machines/addons-877132/id_rsa Username:docker}
I0815 00:06:13.658383 33429 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.0/kubectl apply -f /etc/kubernetes/addons/rbac-external-attacher.yaml -f /etc/kubernetes/addons/rbac-hostpath.yaml -f /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml -f /etc/kubernetes/addons/rbac-external-provisioner.yaml -f /etc/kubernetes/addons/rbac-external-resizer.yaml -f /etc/kubernetes/addons/rbac-external-snapshotter.yaml -f /etc/kubernetes/addons/csi-hostpath-attacher.yaml -f /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml -f /etc/kubernetes/addons/csi-hostpath-plugin.yaml -f /etc/kubernetes/addons/csi-hostpath-resizer.yaml -f /etc/kubernetes/addons/csi-hostpath-storageclass.yaml: (3.488585678s)
I0815 00:06:13.658424 33429 addons.go:475] Verifying addon csi-hostpath-driver=true in "addons-877132"
I0815 00:06:13.658651 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0815 00:06:13.659795 33429 out.go:177] * Verifying csi-hostpath-driver addon...
I0815 00:06:13.662216 33429 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=csi-hostpath-driver" in ns "kube-system" ...
I0815 00:06:13.666005 33429 kapi.go:86] Found 2 Pods for label selector kubernetes.io/minikube-addons=csi-hostpath-driver
I0815 00:06:13.666029 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:13.668835 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:14.155522 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0815 00:06:14.165718 33429 node_ready.go:53] node "addons-877132" has status "Ready":"False"
I0815 00:06:14.166425 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:14.169249 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:14.596258 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0815 00:06:14.664609 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:14.670036 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:15.097283 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0815 00:06:15.166093 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:15.169339 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:15.596326 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0815 00:06:15.665152 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:15.669647 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:16.096862 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0815 00:06:16.165864 33429 node_ready.go:53] node "addons-877132" has status "Ready":"False"
I0815 00:06:16.166340 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:16.196644 33429 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.0/kubectl apply --force -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: (2.930396544s)
I0815 00:06:16.196703 33429 ssh_runner.go:235] Completed: cat /var/lib/minikube/google_application_credentials.json: (2.898786484s)
I0815 00:06:16.198662 33429 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.1
I0815 00:06:16.198680 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:16.201338 33429 out.go:177] - Using image gcr.io/k8s-minikube/gcp-auth-webhook:v0.1.2
I0815 00:06:16.202541 33429 addons.go:431] installing /etc/kubernetes/addons/gcp-auth-ns.yaml
I0815 00:06:16.202556 33429 ssh_runner.go:362] scp gcp-auth/gcp-auth-ns.yaml --> /etc/kubernetes/addons/gcp-auth-ns.yaml (700 bytes)
I0815 00:06:16.219803 33429 addons.go:431] installing /etc/kubernetes/addons/gcp-auth-service.yaml
I0815 00:06:16.219831 33429 ssh_runner.go:362] scp gcp-auth/gcp-auth-service.yaml --> /etc/kubernetes/addons/gcp-auth-service.yaml (788 bytes)
I0815 00:06:16.267002 33429 addons.go:431] installing /etc/kubernetes/addons/gcp-auth-webhook.yaml
I0815 00:06:16.267071 33429 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/gcp-auth-webhook.yaml (5421 bytes)
I0815 00:06:16.283505 33429 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.0/kubectl apply -f /etc/kubernetes/addons/gcp-auth-ns.yaml -f /etc/kubernetes/addons/gcp-auth-service.yaml -f /etc/kubernetes/addons/gcp-auth-webhook.yaml
I0815 00:06:16.596842 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0815 00:06:16.665282 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:16.670150 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:16.805955 33429 addons.go:475] Verifying addon gcp-auth=true in "addons-877132"
I0815 00:06:16.807303 33429 out.go:177] * Verifying gcp-auth addon...
I0815 00:06:16.809043 33429 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=gcp-auth" in ns "gcp-auth" ...
I0815 00:06:16.811299 33429 kapi.go:86] Found 1 Pods for label selector kubernetes.io/minikube-addons=gcp-auth
I0815 00:06:16.811318 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:17.096734 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0815 00:06:17.165013 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:17.168982 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:17.311617 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:17.597189 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0815 00:06:17.665310 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:17.669469 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:17.811621 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:18.097460 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0815 00:06:18.165631 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:18.169545 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:18.311265 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:18.597070 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0815 00:06:18.664448 33429 node_ready.go:53] node "addons-877132" has status "Ready":"False"
I0815 00:06:18.697878 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:18.698131 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:18.811809 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:19.097224 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0815 00:06:19.165165 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:19.169296 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:19.317463 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:19.597102 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0815 00:06:19.665308 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:19.669472 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:19.812377 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:20.096809 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0815 00:06:20.165218 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:20.169284 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:20.312161 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:20.596603 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0815 00:06:20.665074 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:20.669058 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:20.812223 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:21.096596 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0815 00:06:21.165086 33429 node_ready.go:53] node "addons-877132" has status "Ready":"False"
I0815 00:06:21.165136 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:21.168833 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:21.319500 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:21.596822 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0815 00:06:21.665152 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:21.669257 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:21.812305 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:22.096799 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0815 00:06:22.165049 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:22.168933 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:22.311831 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:22.596265 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0815 00:06:22.664599 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:22.669444 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:22.811399 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:23.096811 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0815 00:06:23.165124 33429 node_ready.go:53] node "addons-877132" has status "Ready":"False"
I0815 00:06:23.165243 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:23.169359 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:23.312662 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:23.597142 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0815 00:06:23.664739 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:23.669884 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:23.811958 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:24.096209 33429 kapi.go:86] Found 2 Pods for label selector kubernetes.io/minikube-addons=registry
I0815 00:06:24.096232 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0815 00:06:24.164915 33429 node_ready.go:49] node "addons-877132" has status "Ready":"True"
I0815 00:06:24.164938 33429 node_ready.go:38] duration metric: took 16.503624973s for node "addons-877132" to be "Ready" ...
I0815 00:06:24.164955 33429 pod_ready.go:35] extra waiting up to 6m0s for all system-critical pods including labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0815 00:06:24.166049 33429 kapi.go:86] Found 3 Pods for label selector kubernetes.io/minikube-addons=csi-hostpath-driver
I0815 00:06:24.166068 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:24.170142 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:24.173429 33429 pod_ready.go:78] waiting up to 6m0s for pod "coredns-6f6b679f8f-c42pc" in "kube-system" namespace to be "Ready" ...
I0815 00:06:24.355959 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:24.597389 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0815 00:06:24.666628 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:24.669410 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:24.812130 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:25.096547 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0815 00:06:25.167043 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:25.169602 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:25.355288 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:25.597426 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0815 00:06:25.666971 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:25.670355 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:25.678083 33429 pod_ready.go:92] pod "coredns-6f6b679f8f-c42pc" in "kube-system" namespace has status "Ready":"True"
I0815 00:06:25.678106 33429 pod_ready.go:81] duration metric: took 1.504654703s for pod "coredns-6f6b679f8f-c42pc" in "kube-system" namespace to be "Ready" ...
I0815 00:06:25.678133 33429 pod_ready.go:78] waiting up to 6m0s for pod "etcd-addons-877132" in "kube-system" namespace to be "Ready" ...
I0815 00:06:25.682037 33429 pod_ready.go:92] pod "etcd-addons-877132" in "kube-system" namespace has status "Ready":"True"
I0815 00:06:25.682055 33429 pod_ready.go:81] duration metric: took 3.913671ms for pod "etcd-addons-877132" in "kube-system" namespace to be "Ready" ...
I0815 00:06:25.682078 33429 pod_ready.go:78] waiting up to 6m0s for pod "kube-apiserver-addons-877132" in "kube-system" namespace to be "Ready" ...
I0815 00:06:25.686074 33429 pod_ready.go:92] pod "kube-apiserver-addons-877132" in "kube-system" namespace has status "Ready":"True"
I0815 00:06:25.686092 33429 pod_ready.go:81] duration metric: took 4.003183ms for pod "kube-apiserver-addons-877132" in "kube-system" namespace to be "Ready" ...
I0815 00:06:25.686104 33429 pod_ready.go:78] waiting up to 6m0s for pod "kube-controller-manager-addons-877132" in "kube-system" namespace to be "Ready" ...
I0815 00:06:25.690123 33429 pod_ready.go:92] pod "kube-controller-manager-addons-877132" in "kube-system" namespace has status "Ready":"True"
I0815 00:06:25.690142 33429 pod_ready.go:81] duration metric: took 4.029781ms for pod "kube-controller-manager-addons-877132" in "kube-system" namespace to be "Ready" ...
I0815 00:06:25.690157 33429 pod_ready.go:78] waiting up to 6m0s for pod "kube-proxy-v6kx7" in "kube-system" namespace to be "Ready" ...
I0815 00:06:25.764591 33429 pod_ready.go:92] pod "kube-proxy-v6kx7" in "kube-system" namespace has status "Ready":"True"
I0815 00:06:25.764670 33429 pod_ready.go:81] duration metric: took 74.503022ms for pod "kube-proxy-v6kx7" in "kube-system" namespace to be "Ready" ...
I0815 00:06:25.764686 33429 pod_ready.go:78] waiting up to 6m0s for pod "kube-scheduler-addons-877132" in "kube-system" namespace to be "Ready" ...
I0815 00:06:25.812299 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:26.097806 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0815 00:06:26.169194 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:26.169487 33429 pod_ready.go:92] pod "kube-scheduler-addons-877132" in "kube-system" namespace has status "Ready":"True"
I0815 00:06:26.169514 33429 pod_ready.go:81] duration metric: took 404.819415ms for pod "kube-scheduler-addons-877132" in "kube-system" namespace to be "Ready" ...
I0815 00:06:26.169539 33429 pod_ready.go:78] waiting up to 6m0s for pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace to be "Ready" ...
I0815 00:06:26.172362 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:26.312540 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:26.597942 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0815 00:06:26.666295 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:26.670387 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:26.812404 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:27.097376 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0815 00:06:27.167501 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:27.169841 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:27.312952 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:27.599500 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0815 00:06:27.666954 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:27.669769 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:27.812771 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:28.097661 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0815 00:06:28.167011 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:28.169733 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:28.174188 33429 pod_ready.go:102] pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace has status "Ready":"False"
I0815 00:06:28.312769 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:28.597722 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0815 00:06:28.666848 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:28.669482 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:28.812800 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:29.098209 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0815 00:06:29.167180 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:29.169890 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:29.312700 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:29.597754 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0815 00:06:29.665572 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:29.669489 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:29.812665 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:30.157157 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0815 00:06:30.166642 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:30.177519 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:30.180207 33429 pod_ready.go:102] pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace has status "Ready":"False"
I0815 00:06:30.356588 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:30.597957 33429 kapi.go:107] duration metric: took 17.504196925s to wait for kubernetes.io/minikube-addons=registry ...
I0815 00:06:30.666243 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:30.670815 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:30.813007 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:31.167613 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:31.169328 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:31.313181 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:31.666720 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:31.669434 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:31.811910 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:32.166506 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:32.169062 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:32.311776 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:32.666775 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:32.670042 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:32.674328 33429 pod_ready.go:102] pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace has status "Ready":"False"
I0815 00:06:32.811997 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:33.169603 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:33.170197 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:33.356708 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:33.666304 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:33.670392 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:33.812677 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:34.167381 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:34.170171 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:34.312544 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:34.666532 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:34.669482 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:34.812211 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:35.167257 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:35.169456 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:35.173423 33429 pod_ready.go:102] pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace has status "Ready":"False"
I0815 00:06:35.312015 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:35.666706 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:35.669857 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:35.812364 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:36.166598 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:36.169053 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:36.312373 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:36.667821 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:36.671196 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:36.857237 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:37.169468 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:37.170919 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:37.175323 33429 pod_ready.go:102] pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace has status "Ready":"False"
I0815 00:06:37.355970 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:37.666841 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:37.670268 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:37.812428 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:38.167080 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:38.170187 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:38.312594 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:38.666054 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:38.669825 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:38.812021 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:39.166241 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:39.267161 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:39.311799 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:39.667701 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:39.670267 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:39.674734 33429 pod_ready.go:102] pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace has status "Ready":"False"
I0815 00:06:39.812349 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:40.168015 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:40.169594 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:40.312432 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:40.665674 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:40.669752 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:40.812537 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:41.167876 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:41.169598 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:41.312173 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:41.666948 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:41.670803 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:41.812414 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:42.166289 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:42.169867 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:42.173537 33429 pod_ready.go:102] pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace has status "Ready":"False"
I0815 00:06:42.311882 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:42.667753 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:42.670676 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:42.812295 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:43.169618 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:43.169854 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:43.313320 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:43.666307 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:43.670383 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:43.812476 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:44.167127 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:44.170013 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:44.174233 33429 pod_ready.go:102] pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace has status "Ready":"False"
I0815 00:06:44.311846 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:44.667126 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:44.669867 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:44.855643 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:45.167763 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:45.170016 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:45.313417 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:45.666129 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:45.670059 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:45.813091 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:46.166678 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:46.169893 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:46.312450 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:46.665829 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:46.670061 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:46.674101 33429 pod_ready.go:102] pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace has status "Ready":"False"
I0815 00:06:46.812249 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:47.169598 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:47.169608 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:47.312248 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:47.666873 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:47.669374 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:47.812134 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:48.167158 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:48.170215 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:48.312747 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:48.666203 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:48.670026 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:48.812154 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:49.166461 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:49.169165 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:49.174184 33429 pod_ready.go:102] pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace has status "Ready":"False"
I0815 00:06:49.312823 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:49.666717 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:49.669712 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:49.812030 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:50.166358 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:50.170069 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:50.312131 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:50.666409 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:50.669159 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:50.811804 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:51.167643 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:51.170383 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:51.174565 33429 pod_ready.go:102] pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace has status "Ready":"False"
I0815 00:06:51.357329 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:51.667694 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:51.673988 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:51.855570 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:52.167630 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:52.171705 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:52.357523 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:52.667416 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:52.671989 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:52.856473 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:53.167342 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:53.170225 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:53.357056 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:53.667785 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:53.670341 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:53.675346 33429 pod_ready.go:102] pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace has status "Ready":"False"
I0815 00:06:53.812287 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:54.167962 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:54.169368 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:54.312346 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:54.665866 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:54.670543 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:54.812505 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:55.166866 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:55.169578 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:55.312197 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:55.667048 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:55.670233 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:55.812036 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:56.167650 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:56.169862 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:56.173888 33429 pod_ready.go:102] pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace has status "Ready":"False"
I0815 00:06:56.312438 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:56.666120 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:56.670307 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:56.811811 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:57.168190 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:57.171201 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:57.313042 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:57.673029 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:57.675625 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:57.813071 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:58.167075 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:58.170091 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:58.175180 33429 pod_ready.go:102] pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace has status "Ready":"False"
I0815 00:06:58.312767 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:58.666795 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:58.669391 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:58.812165 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:59.167267 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:59.170177 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:59.312417 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:06:59.666057 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:06:59.669690 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:06:59.811822 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:07:00.166831 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:07:00.170224 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:00.312503 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:07:00.667657 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:07:00.676413 33429 pod_ready.go:102] pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace has status "Ready":"False"
I0815 00:07:00.767451 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:00.812517 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:07:01.166638 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:07:01.169325 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:01.312692 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:07:01.666198 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:07:01.669887 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:01.812554 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:07:02.168126 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:07:02.169326 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:02.313091 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:07:02.667880 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:07:02.669870 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:02.865938 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:07:03.167117 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:07:03.176085 33429 pod_ready.go:102] pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace has status "Ready":"False"
I0815 00:07:03.267575 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:03.367374 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0815 00:07:03.666205 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:07:03.671232 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:03.812560 33429 kapi.go:107] duration metric: took 47.003516074s to wait for kubernetes.io/minikube-addons=gcp-auth ...
I0815 00:07:03.814146 33429 out.go:177] * Your GCP credentials will now be mounted into every pod created in the addons-877132 cluster.
I0815 00:07:03.815458 33429 out.go:177] * If you don't want your credentials mounted into a specific pod, add a label with the `gcp-auth-skip-secret` key to your pod configuration.
I0815 00:07:03.816787 33429 out.go:177] * If you want existing pods to be mounted with credentials, either recreate them or rerun addons enable with --refresh.
I0815 00:07:04.166733 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:07:04.170848 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:04.671507 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:07:04.684842 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:05.166792 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:07:05.169699 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:05.666612 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:07:05.669642 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:05.674348 33429 pod_ready.go:102] pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace has status "Ready":"False"
I0815 00:07:06.166989 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:07:06.169586 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:06.667233 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:07:06.670146 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:07.166493 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:07:07.169125 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:07.667067 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:07:07.670993 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:07.674543 33429 pod_ready.go:102] pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace has status "Ready":"False"
I0815 00:07:08.166585 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:07:08.169456 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:08.667276 33429 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0815 00:07:08.670520 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:09.166747 33429 kapi.go:107] duration metric: took 55.504525178s to wait for kubernetes.io/minikube-addons=csi-hostpath-driver ...
I0815 00:07:09.169549 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:09.670367 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:10.170088 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:10.173925 33429 pod_ready.go:102] pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace has status "Ready":"False"
I0815 00:07:10.670285 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:11.169891 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:11.670347 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:12.169423 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:12.174126 33429 pod_ready.go:102] pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace has status "Ready":"False"
I0815 00:07:12.670730 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:13.169706 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:13.670690 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:14.169616 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:14.670409 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:14.675991 33429 pod_ready.go:102] pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace has status "Ready":"False"
I0815 00:07:15.169947 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:15.670905 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:16.170207 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:16.670228 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:17.169428 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:17.173681 33429 pod_ready.go:102] pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace has status "Ready":"False"
I0815 00:07:17.670291 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:18.169163 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:18.670150 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:19.169975 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:19.174116 33429 pod_ready.go:102] pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace has status "Ready":"False"
I0815 00:07:19.768032 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:20.171759 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:20.670635 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:21.170137 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:21.670728 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:21.673692 33429 pod_ready.go:102] pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace has status "Ready":"False"
I0815 00:07:22.169711 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:22.670073 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:23.170251 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:23.670243 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:23.674950 33429 pod_ready.go:102] pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace has status "Ready":"False"
I0815 00:07:24.169467 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:24.670307 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:25.169275 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:25.670140 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:26.170211 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:26.174259 33429 pod_ready.go:102] pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace has status "Ready":"False"
I0815 00:07:26.670923 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:27.169802 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:27.670058 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:28.170245 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:28.180361 33429 pod_ready.go:102] pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace has status "Ready":"False"
I0815 00:07:28.671837 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:29.174730 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:29.671621 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:30.176404 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:30.259231 33429 pod_ready.go:102] pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace has status "Ready":"False"
I0815 00:07:30.671253 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:31.170859 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:31.670796 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:32.170361 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:32.670998 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:32.674300 33429 pod_ready.go:102] pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace has status "Ready":"False"
I0815 00:07:33.170206 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:33.671980 33429 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0815 00:07:34.170420 33429 kapi.go:107] duration metric: took 1m22.004022687s to wait for app.kubernetes.io/name=ingress-nginx ...
I0815 00:07:34.172004 33429 out.go:177] * Enabled addons: storage-provisioner, ingress-dns, nvidia-device-plugin, cloud-spanner, helm-tiller, metrics-server, default-storageclass, inspektor-gadget, yakd, volumesnapshots, registry, gcp-auth, csi-hostpath-driver, ingress
I0815 00:07:34.173191 33429 addons.go:510] duration metric: took 1m28.545170819s for enable addons: enabled=[storage-provisioner ingress-dns nvidia-device-plugin cloud-spanner helm-tiller metrics-server default-storageclass inspektor-gadget yakd volumesnapshots registry gcp-auth csi-hostpath-driver ingress]
I0815 00:07:35.175895 33429 pod_ready.go:102] pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace has status "Ready":"False"
I0815 00:07:37.674777 33429 pod_ready.go:102] pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace has status "Ready":"False"
I0815 00:07:40.174631 33429 pod_ready.go:102] pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace has status "Ready":"False"
I0815 00:07:42.174721 33429 pod_ready.go:102] pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace has status "Ready":"False"
I0815 00:07:44.674328 33429 pod_ready.go:102] pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace has status "Ready":"False"
I0815 00:07:46.675786 33429 pod_ready.go:102] pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace has status "Ready":"False"
I0815 00:07:49.174408 33429 pod_ready.go:102] pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace has status "Ready":"False"
I0815 00:07:51.674873 33429 pod_ready.go:102] pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace has status "Ready":"False"
I0815 00:07:54.174351 33429 pod_ready.go:102] pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace has status "Ready":"False"
I0815 00:07:56.174565 33429 pod_ready.go:102] pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace has status "Ready":"False"
I0815 00:07:58.174795 33429 pod_ready.go:102] pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace has status "Ready":"False"
I0815 00:08:00.175420 33429 pod_ready.go:102] pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace has status "Ready":"False"
I0815 00:08:02.674741 33429 pod_ready.go:102] pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace has status "Ready":"False"
I0815 00:08:04.674774 33429 pod_ready.go:92] pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace has status "Ready":"True"
I0815 00:08:04.674806 33429 pod_ready.go:81] duration metric: took 1m38.505250087s for pod "metrics-server-8988944d9-sgrxc" in "kube-system" namespace to be "Ready" ...
I0815 00:08:04.674822 33429 pod_ready.go:78] waiting up to 6m0s for pod "nvidia-device-plugin-daemonset-6d62n" in "kube-system" namespace to be "Ready" ...
I0815 00:08:04.678550 33429 pod_ready.go:92] pod "nvidia-device-plugin-daemonset-6d62n" in "kube-system" namespace has status "Ready":"True"
I0815 00:08:04.678569 33429 pod_ready.go:81] duration metric: took 3.739721ms for pod "nvidia-device-plugin-daemonset-6d62n" in "kube-system" namespace to be "Ready" ...
I0815 00:08:04.678586 33429 pod_ready.go:38] duration metric: took 1m40.513617774s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0815 00:08:04.678603 33429 api_server.go:52] waiting for apiserver process to appear ...
I0815 00:08:04.678630 33429 cri.go:54] listing CRI containers in root : {State:all Name:kube-apiserver Namespaces:[]}
I0815 00:08:04.678677 33429 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I0815 00:08:04.710676 33429 cri.go:89] found id: "ea70e9f2778e6bfd0482547ef4e30fb6b1e37e3161a709b7913a069a0d6c1249"
I0815 00:08:04.710700 33429 cri.go:89] found id: ""
I0815 00:08:04.710708 33429 logs.go:276] 1 containers: [ea70e9f2778e6bfd0482547ef4e30fb6b1e37e3161a709b7913a069a0d6c1249]
I0815 00:08:04.710757 33429 ssh_runner.go:195] Run: which crictl
I0815 00:08:04.713725 33429 cri.go:54] listing CRI containers in root : {State:all Name:etcd Namespaces:[]}
I0815 00:08:04.713779 33429 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I0815 00:08:04.744311 33429 cri.go:89] found id: "f16f228580088705d9d978dd7020a6f22abb4f154e076a1986297e2d03e0cdec"
I0815 00:08:04.744335 33429 cri.go:89] found id: ""
I0815 00:08:04.744345 33429 logs.go:276] 1 containers: [f16f228580088705d9d978dd7020a6f22abb4f154e076a1986297e2d03e0cdec]
I0815 00:08:04.744387 33429 ssh_runner.go:195] Run: which crictl
I0815 00:08:04.747394 33429 cri.go:54] listing CRI containers in root : {State:all Name:coredns Namespaces:[]}
I0815 00:08:04.747437 33429 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I0815 00:08:04.777949 33429 cri.go:89] found id: "4ba66a3367daf4b53a35259a09c9bd004e02804ed6293ed0baf74ac2ef4f06d3"
I0815 00:08:04.777966 33429 cri.go:89] found id: ""
I0815 00:08:04.777973 33429 logs.go:276] 1 containers: [4ba66a3367daf4b53a35259a09c9bd004e02804ed6293ed0baf74ac2ef4f06d3]
I0815 00:08:04.778010 33429 ssh_runner.go:195] Run: which crictl
I0815 00:08:04.780902 33429 cri.go:54] listing CRI containers in root : {State:all Name:kube-scheduler Namespaces:[]}
I0815 00:08:04.780976 33429 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I0815 00:08:04.812184 33429 cri.go:89] found id: "bd77b5ecfadb9e4b0eb677138b067975344fac163e26352db7d5ce14d50ed8f0"
I0815 00:08:04.812204 33429 cri.go:89] found id: ""
I0815 00:08:04.812213 33429 logs.go:276] 1 containers: [bd77b5ecfadb9e4b0eb677138b067975344fac163e26352db7d5ce14d50ed8f0]
I0815 00:08:04.812254 33429 ssh_runner.go:195] Run: which crictl
I0815 00:08:04.815194 33429 cri.go:54] listing CRI containers in root : {State:all Name:kube-proxy Namespaces:[]}
I0815 00:08:04.815263 33429 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I0815 00:08:04.845303 33429 cri.go:89] found id: "e5fd37ee5ee48f313a2bbb8de2bf49949a8ca143f909bf33e1d7a3ca648839a1"
I0815 00:08:04.845321 33429 cri.go:89] found id: ""
I0815 00:08:04.845329 33429 logs.go:276] 1 containers: [e5fd37ee5ee48f313a2bbb8de2bf49949a8ca143f909bf33e1d7a3ca648839a1]
I0815 00:08:04.845367 33429 ssh_runner.go:195] Run: which crictl
I0815 00:08:04.848510 33429 cri.go:54] listing CRI containers in root : {State:all Name:kube-controller-manager Namespaces:[]}
I0815 00:08:04.848570 33429 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I0815 00:08:04.879573 33429 cri.go:89] found id: "4043a5cc95e0b642f3d43fc29910c36484e438e6f5eb9296ff38dcbd460cb280"
I0815 00:08:04.879594 33429 cri.go:89] found id: ""
I0815 00:08:04.879601 33429 logs.go:276] 1 containers: [4043a5cc95e0b642f3d43fc29910c36484e438e6f5eb9296ff38dcbd460cb280]
I0815 00:08:04.879654 33429 ssh_runner.go:195] Run: which crictl
I0815 00:08:04.882866 33429 cri.go:54] listing CRI containers in root : {State:all Name:kindnet Namespaces:[]}
I0815 00:08:04.882926 33429 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I0815 00:08:04.913837 33429 cri.go:89] found id: "17f6bd6dd22c5288ff6a4dc156ad4dd1d32d2895bfc8398e63705bc5613cf677"
I0815 00:08:04.913859 33429 cri.go:89] found id: ""
I0815 00:08:04.913866 33429 logs.go:276] 1 containers: [17f6bd6dd22c5288ff6a4dc156ad4dd1d32d2895bfc8398e63705bc5613cf677]
I0815 00:08:04.913905 33429 ssh_runner.go:195] Run: which crictl
I0815 00:08:04.917007 33429 logs.go:123] Gathering logs for kube-proxy [e5fd37ee5ee48f313a2bbb8de2bf49949a8ca143f909bf33e1d7a3ca648839a1] ...
I0815 00:08:04.917030 33429 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 e5fd37ee5ee48f313a2bbb8de2bf49949a8ca143f909bf33e1d7a3ca648839a1"
I0815 00:08:04.947729 33429 logs.go:123] Gathering logs for kindnet [17f6bd6dd22c5288ff6a4dc156ad4dd1d32d2895bfc8398e63705bc5613cf677] ...
I0815 00:08:04.947755 33429 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 17f6bd6dd22c5288ff6a4dc156ad4dd1d32d2895bfc8398e63705bc5613cf677"
I0815 00:08:04.983589 33429 logs.go:123] Gathering logs for dmesg ...
I0815 00:08:04.983615 33429 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I0815 00:08:04.995473 33429 logs.go:123] Gathering logs for describe nodes ...
I0815 00:08:04.995501 33429 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
I0815 00:08:05.087662 33429 logs.go:123] Gathering logs for kube-apiserver [ea70e9f2778e6bfd0482547ef4e30fb6b1e37e3161a709b7913a069a0d6c1249] ...
I0815 00:08:05.087690 33429 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 ea70e9f2778e6bfd0482547ef4e30fb6b1e37e3161a709b7913a069a0d6c1249"
I0815 00:08:05.129108 33429 logs.go:123] Gathering logs for coredns [4ba66a3367daf4b53a35259a09c9bd004e02804ed6293ed0baf74ac2ef4f06d3] ...
I0815 00:08:05.129137 33429 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 4ba66a3367daf4b53a35259a09c9bd004e02804ed6293ed0baf74ac2ef4f06d3"
I0815 00:08:05.164587 33429 logs.go:123] Gathering logs for kube-scheduler [bd77b5ecfadb9e4b0eb677138b067975344fac163e26352db7d5ce14d50ed8f0] ...
I0815 00:08:05.164624 33429 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 bd77b5ecfadb9e4b0eb677138b067975344fac163e26352db7d5ce14d50ed8f0"
I0815 00:08:05.203248 33429 logs.go:123] Gathering logs for kubelet ...
I0815 00:08:05.203273 33429 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I0815 00:08:05.270185 33429 logs.go:123] Gathering logs for etcd [f16f228580088705d9d978dd7020a6f22abb4f154e076a1986297e2d03e0cdec] ...
I0815 00:08:05.270214 33429 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 f16f228580088705d9d978dd7020a6f22abb4f154e076a1986297e2d03e0cdec"
I0815 00:08:05.317054 33429 logs.go:123] Gathering logs for kube-controller-manager [4043a5cc95e0b642f3d43fc29910c36484e438e6f5eb9296ff38dcbd460cb280] ...
I0815 00:08:05.317083 33429 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 4043a5cc95e0b642f3d43fc29910c36484e438e6f5eb9296ff38dcbd460cb280"
I0815 00:08:05.370222 33429 logs.go:123] Gathering logs for CRI-O ...
I0815 00:08:05.370252 33429 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u crio -n 400"
I0815 00:08:05.446168 33429 logs.go:123] Gathering logs for container status ...
I0815 00:08:05.446204 33429 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I0815 00:08:07.987446 33429 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0815 00:08:08.000573 33429 api_server.go:72] duration metric: took 2m2.372588715s to wait for apiserver process to appear ...
I0815 00:08:08.000594 33429 api_server.go:88] waiting for apiserver healthz status ...
I0815 00:08:08.000627 33429 cri.go:54] listing CRI containers in root : {State:all Name:kube-apiserver Namespaces:[]}
I0815 00:08:08.000662 33429 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I0815 00:08:08.031934 33429 cri.go:89] found id: "ea70e9f2778e6bfd0482547ef4e30fb6b1e37e3161a709b7913a069a0d6c1249"
I0815 00:08:08.031958 33429 cri.go:89] found id: ""
I0815 00:08:08.031967 33429 logs.go:276] 1 containers: [ea70e9f2778e6bfd0482547ef4e30fb6b1e37e3161a709b7913a069a0d6c1249]
I0815 00:08:08.032018 33429 ssh_runner.go:195] Run: which crictl
I0815 00:08:08.034976 33429 cri.go:54] listing CRI containers in root : {State:all Name:etcd Namespaces:[]}
I0815 00:08:08.035037 33429 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I0815 00:08:08.065162 33429 cri.go:89] found id: "f16f228580088705d9d978dd7020a6f22abb4f154e076a1986297e2d03e0cdec"
I0815 00:08:08.065186 33429 cri.go:89] found id: ""
I0815 00:08:08.065194 33429 logs.go:276] 1 containers: [f16f228580088705d9d978dd7020a6f22abb4f154e076a1986297e2d03e0cdec]
I0815 00:08:08.065236 33429 ssh_runner.go:195] Run: which crictl
I0815 00:08:08.068160 33429 cri.go:54] listing CRI containers in root : {State:all Name:coredns Namespaces:[]}
I0815 00:08:08.068208 33429 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I0815 00:08:08.099502 33429 cri.go:89] found id: "4ba66a3367daf4b53a35259a09c9bd004e02804ed6293ed0baf74ac2ef4f06d3"
I0815 00:08:08.099523 33429 cri.go:89] found id: ""
I0815 00:08:08.099531 33429 logs.go:276] 1 containers: [4ba66a3367daf4b53a35259a09c9bd004e02804ed6293ed0baf74ac2ef4f06d3]
I0815 00:08:08.099578 33429 ssh_runner.go:195] Run: which crictl
I0815 00:08:08.102636 33429 cri.go:54] listing CRI containers in root : {State:all Name:kube-scheduler Namespaces:[]}
I0815 00:08:08.102683 33429 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I0815 00:08:08.134129 33429 cri.go:89] found id: "bd77b5ecfadb9e4b0eb677138b067975344fac163e26352db7d5ce14d50ed8f0"
I0815 00:08:08.134149 33429 cri.go:89] found id: ""
I0815 00:08:08.134157 33429 logs.go:276] 1 containers: [bd77b5ecfadb9e4b0eb677138b067975344fac163e26352db7d5ce14d50ed8f0]
I0815 00:08:08.134193 33429 ssh_runner.go:195] Run: which crictl
I0815 00:08:08.137077 33429 cri.go:54] listing CRI containers in root : {State:all Name:kube-proxy Namespaces:[]}
I0815 00:08:08.137118 33429 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I0815 00:08:08.169612 33429 cri.go:89] found id: "e5fd37ee5ee48f313a2bbb8de2bf49949a8ca143f909bf33e1d7a3ca648839a1"
I0815 00:08:08.169633 33429 cri.go:89] found id: ""
I0815 00:08:08.169643 33429 logs.go:276] 1 containers: [e5fd37ee5ee48f313a2bbb8de2bf49949a8ca143f909bf33e1d7a3ca648839a1]
I0815 00:08:08.169693 33429 ssh_runner.go:195] Run: which crictl
I0815 00:08:08.173000 33429 cri.go:54] listing CRI containers in root : {State:all Name:kube-controller-manager Namespaces:[]}
I0815 00:08:08.173051 33429 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I0815 00:08:08.203461 33429 cri.go:89] found id: "4043a5cc95e0b642f3d43fc29910c36484e438e6f5eb9296ff38dcbd460cb280"
I0815 00:08:08.203485 33429 cri.go:89] found id: ""
I0815 00:08:08.203494 33429 logs.go:276] 1 containers: [4043a5cc95e0b642f3d43fc29910c36484e438e6f5eb9296ff38dcbd460cb280]
I0815 00:08:08.203533 33429 ssh_runner.go:195] Run: which crictl
I0815 00:08:08.206389 33429 cri.go:54] listing CRI containers in root : {State:all Name:kindnet Namespaces:[]}
I0815 00:08:08.206430 33429 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I0815 00:08:08.236086 33429 cri.go:89] found id: "17f6bd6dd22c5288ff6a4dc156ad4dd1d32d2895bfc8398e63705bc5613cf677"
I0815 00:08:08.236109 33429 cri.go:89] found id: ""
I0815 00:08:08.236119 33429 logs.go:276] 1 containers: [17f6bd6dd22c5288ff6a4dc156ad4dd1d32d2895bfc8398e63705bc5613cf677]
I0815 00:08:08.236166 33429 ssh_runner.go:195] Run: which crictl
I0815 00:08:08.239141 33429 logs.go:123] Gathering logs for dmesg ...
I0815 00:08:08.239159 33429 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I0815 00:08:08.249874 33429 logs.go:123] Gathering logs for describe nodes ...
I0815 00:08:08.249896 33429 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
I0815 00:08:08.340261 33429 logs.go:123] Gathering logs for kube-controller-manager [4043a5cc95e0b642f3d43fc29910c36484e438e6f5eb9296ff38dcbd460cb280] ...
I0815 00:08:08.340287 33429 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 4043a5cc95e0b642f3d43fc29910c36484e438e6f5eb9296ff38dcbd460cb280"
I0815 00:08:08.394232 33429 logs.go:123] Gathering logs for CRI-O ...
I0815 00:08:08.394260 33429 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u crio -n 400"
I0815 00:08:08.466817 33429 logs.go:123] Gathering logs for container status ...
I0815 00:08:08.466849 33429 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I0815 00:08:08.506450 33429 logs.go:123] Gathering logs for kubelet ...
I0815 00:08:08.506477 33429 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I0815 00:08:08.573143 33429 logs.go:123] Gathering logs for kube-apiserver [ea70e9f2778e6bfd0482547ef4e30fb6b1e37e3161a709b7913a069a0d6c1249] ...
I0815 00:08:08.573173 33429 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 ea70e9f2778e6bfd0482547ef4e30fb6b1e37e3161a709b7913a069a0d6c1249"
I0815 00:08:08.613210 33429 logs.go:123] Gathering logs for etcd [f16f228580088705d9d978dd7020a6f22abb4f154e076a1986297e2d03e0cdec] ...
I0815 00:08:08.613235 33429 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 f16f228580088705d9d978dd7020a6f22abb4f154e076a1986297e2d03e0cdec"
I0815 00:08:08.659426 33429 logs.go:123] Gathering logs for coredns [4ba66a3367daf4b53a35259a09c9bd004e02804ed6293ed0baf74ac2ef4f06d3] ...
I0815 00:08:08.659453 33429 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 4ba66a3367daf4b53a35259a09c9bd004e02804ed6293ed0baf74ac2ef4f06d3"
I0815 00:08:08.695176 33429 logs.go:123] Gathering logs for kube-scheduler [bd77b5ecfadb9e4b0eb677138b067975344fac163e26352db7d5ce14d50ed8f0] ...
I0815 00:08:08.695200 33429 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 bd77b5ecfadb9e4b0eb677138b067975344fac163e26352db7d5ce14d50ed8f0"
I0815 00:08:08.732673 33429 logs.go:123] Gathering logs for kube-proxy [e5fd37ee5ee48f313a2bbb8de2bf49949a8ca143f909bf33e1d7a3ca648839a1] ...
I0815 00:08:08.732699 33429 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 e5fd37ee5ee48f313a2bbb8de2bf49949a8ca143f909bf33e1d7a3ca648839a1"
I0815 00:08:08.762290 33429 logs.go:123] Gathering logs for kindnet [17f6bd6dd22c5288ff6a4dc156ad4dd1d32d2895bfc8398e63705bc5613cf677] ...
I0815 00:08:08.762314 33429 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 17f6bd6dd22c5288ff6a4dc156ad4dd1d32d2895bfc8398e63705bc5613cf677"
I0815 00:08:11.299374 33429 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8443/healthz ...
I0815 00:08:11.302863 33429 api_server.go:279] https://192.168.49.2:8443/healthz returned 200:
ok
I0815 00:08:11.303608 33429 api_server.go:141] control plane version: v1.31.0
I0815 00:08:11.303629 33429 api_server.go:131] duration metric: took 3.30302873s to wait for apiserver health ...
I0815 00:08:11.303638 33429 system_pods.go:43] waiting for kube-system pods to appear ...
I0815 00:08:11.303662 33429 cri.go:54] listing CRI containers in root : {State:all Name:kube-apiserver Namespaces:[]}
I0815 00:08:11.303715 33429 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-apiserver
I0815 00:08:11.335368 33429 cri.go:89] found id: "ea70e9f2778e6bfd0482547ef4e30fb6b1e37e3161a709b7913a069a0d6c1249"
I0815 00:08:11.335387 33429 cri.go:89] found id: ""
I0815 00:08:11.335394 33429 logs.go:276] 1 containers: [ea70e9f2778e6bfd0482547ef4e30fb6b1e37e3161a709b7913a069a0d6c1249]
I0815 00:08:11.335433 33429 ssh_runner.go:195] Run: which crictl
I0815 00:08:11.338517 33429 cri.go:54] listing CRI containers in root : {State:all Name:etcd Namespaces:[]}
I0815 00:08:11.338588 33429 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=etcd
I0815 00:08:11.368653 33429 cri.go:89] found id: "f16f228580088705d9d978dd7020a6f22abb4f154e076a1986297e2d03e0cdec"
I0815 00:08:11.368675 33429 cri.go:89] found id: ""
I0815 00:08:11.368682 33429 logs.go:276] 1 containers: [f16f228580088705d9d978dd7020a6f22abb4f154e076a1986297e2d03e0cdec]
I0815 00:08:11.368727 33429 ssh_runner.go:195] Run: which crictl
I0815 00:08:11.371711 33429 cri.go:54] listing CRI containers in root : {State:all Name:coredns Namespaces:[]}
I0815 00:08:11.371762 33429 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=coredns
I0815 00:08:11.403775 33429 cri.go:89] found id: "4ba66a3367daf4b53a35259a09c9bd004e02804ed6293ed0baf74ac2ef4f06d3"
I0815 00:08:11.403798 33429 cri.go:89] found id: ""
I0815 00:08:11.403808 33429 logs.go:276] 1 containers: [4ba66a3367daf4b53a35259a09c9bd004e02804ed6293ed0baf74ac2ef4f06d3]
I0815 00:08:11.403853 33429 ssh_runner.go:195] Run: which crictl
I0815 00:08:11.406855 33429 cri.go:54] listing CRI containers in root : {State:all Name:kube-scheduler Namespaces:[]}
I0815 00:08:11.406913 33429 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-scheduler
I0815 00:08:11.437894 33429 cri.go:89] found id: "bd77b5ecfadb9e4b0eb677138b067975344fac163e26352db7d5ce14d50ed8f0"
I0815 00:08:11.437911 33429 cri.go:89] found id: ""
I0815 00:08:11.437918 33429 logs.go:276] 1 containers: [bd77b5ecfadb9e4b0eb677138b067975344fac163e26352db7d5ce14d50ed8f0]
I0815 00:08:11.437963 33429 ssh_runner.go:195] Run: which crictl
I0815 00:08:11.440939 33429 cri.go:54] listing CRI containers in root : {State:all Name:kube-proxy Namespaces:[]}
I0815 00:08:11.440996 33429 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-proxy
I0815 00:08:11.472247 33429 cri.go:89] found id: "e5fd37ee5ee48f313a2bbb8de2bf49949a8ca143f909bf33e1d7a3ca648839a1"
I0815 00:08:11.472267 33429 cri.go:89] found id: ""
I0815 00:08:11.472274 33429 logs.go:276] 1 containers: [e5fd37ee5ee48f313a2bbb8de2bf49949a8ca143f909bf33e1d7a3ca648839a1]
I0815 00:08:11.472312 33429 ssh_runner.go:195] Run: which crictl
I0815 00:08:11.475285 33429 cri.go:54] listing CRI containers in root : {State:all Name:kube-controller-manager Namespaces:[]}
I0815 00:08:11.475339 33429 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kube-controller-manager
I0815 00:08:11.505337 33429 cri.go:89] found id: "4043a5cc95e0b642f3d43fc29910c36484e438e6f5eb9296ff38dcbd460cb280"
I0815 00:08:11.505359 33429 cri.go:89] found id: ""
I0815 00:08:11.505367 33429 logs.go:276] 1 containers: [4043a5cc95e0b642f3d43fc29910c36484e438e6f5eb9296ff38dcbd460cb280]
I0815 00:08:11.505419 33429 ssh_runner.go:195] Run: which crictl
I0815 00:08:11.508302 33429 cri.go:54] listing CRI containers in root : {State:all Name:kindnet Namespaces:[]}
I0815 00:08:11.508356 33429 ssh_runner.go:195] Run: sudo crictl ps -a --quiet --name=kindnet
I0815 00:08:11.539121 33429 cri.go:89] found id: "17f6bd6dd22c5288ff6a4dc156ad4dd1d32d2895bfc8398e63705bc5613cf677"
I0815 00:08:11.539144 33429 cri.go:89] found id: ""
I0815 00:08:11.539153 33429 logs.go:276] 1 containers: [17f6bd6dd22c5288ff6a4dc156ad4dd1d32d2895bfc8398e63705bc5613cf677]
I0815 00:08:11.539199 33429 ssh_runner.go:195] Run: which crictl
I0815 00:08:11.542054 33429 logs.go:123] Gathering logs for kubelet ...
I0815 00:08:11.542077 33429 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u kubelet -n 400"
I0815 00:08:11.611565 33429 logs.go:123] Gathering logs for dmesg ...
I0815 00:08:11.611596 33429 ssh_runner.go:195] Run: /bin/bash -c "sudo dmesg -PH -L=never --level warn,err,crit,alert,emerg | tail -n 400"
I0815 00:08:11.623230 33429 logs.go:123] Gathering logs for etcd [f16f228580088705d9d978dd7020a6f22abb4f154e076a1986297e2d03e0cdec] ...
I0815 00:08:11.623255 33429 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 f16f228580088705d9d978dd7020a6f22abb4f154e076a1986297e2d03e0cdec"
I0815 00:08:11.670940 33429 logs.go:123] Gathering logs for kindnet [17f6bd6dd22c5288ff6a4dc156ad4dd1d32d2895bfc8398e63705bc5613cf677] ...
I0815 00:08:11.670967 33429 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 17f6bd6dd22c5288ff6a4dc156ad4dd1d32d2895bfc8398e63705bc5613cf677"
I0815 00:08:11.706879 33429 logs.go:123] Gathering logs for container status ...
I0815 00:08:11.706906 33429 ssh_runner.go:195] Run: /bin/bash -c "sudo `which crictl || echo crictl` ps -a || sudo docker ps -a"
I0815 00:08:11.745902 33429 logs.go:123] Gathering logs for kube-controller-manager [4043a5cc95e0b642f3d43fc29910c36484e438e6f5eb9296ff38dcbd460cb280] ...
I0815 00:08:11.745929 33429 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 4043a5cc95e0b642f3d43fc29910c36484e438e6f5eb9296ff38dcbd460cb280"
I0815 00:08:11.802685 33429 logs.go:123] Gathering logs for CRI-O ...
I0815 00:08:11.802714 33429 ssh_runner.go:195] Run: /bin/bash -c "sudo journalctl -u crio -n 400"
I0815 00:08:11.873752 33429 logs.go:123] Gathering logs for describe nodes ...
I0815 00:08:11.873781 33429 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.0/kubectl describe nodes --kubeconfig=/var/lib/minikube/kubeconfig"
I0815 00:08:11.962736 33429 logs.go:123] Gathering logs for kube-apiserver [ea70e9f2778e6bfd0482547ef4e30fb6b1e37e3161a709b7913a069a0d6c1249] ...
I0815 00:08:11.962765 33429 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 ea70e9f2778e6bfd0482547ef4e30fb6b1e37e3161a709b7913a069a0d6c1249"
I0815 00:08:12.004013 33429 logs.go:123] Gathering logs for coredns [4ba66a3367daf4b53a35259a09c9bd004e02804ed6293ed0baf74ac2ef4f06d3] ...
I0815 00:08:12.004041 33429 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 4ba66a3367daf4b53a35259a09c9bd004e02804ed6293ed0baf74ac2ef4f06d3"
I0815 00:08:12.039680 33429 logs.go:123] Gathering logs for kube-scheduler [bd77b5ecfadb9e4b0eb677138b067975344fac163e26352db7d5ce14d50ed8f0] ...
I0815 00:08:12.039709 33429 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 bd77b5ecfadb9e4b0eb677138b067975344fac163e26352db7d5ce14d50ed8f0"
I0815 00:08:12.079354 33429 logs.go:123] Gathering logs for kube-proxy [e5fd37ee5ee48f313a2bbb8de2bf49949a8ca143f909bf33e1d7a3ca648839a1] ...
I0815 00:08:12.079381 33429 ssh_runner.go:195] Run: /bin/bash -c "sudo /usr/bin/crictl logs --tail 400 e5fd37ee5ee48f313a2bbb8de2bf49949a8ca143f909bf33e1d7a3ca648839a1"
I0815 00:08:14.620661 33429 system_pods.go:59] 19 kube-system pods found
I0815 00:08:14.620688 33429 system_pods.go:61] "coredns-6f6b679f8f-c42pc" [c7d6d0e1-376e-4009-b23c-4ec563e9fb5c] Running
I0815 00:08:14.620693 33429 system_pods.go:61] "csi-hostpath-attacher-0" [fc9a04f6-9b77-46c0-8179-7faf0b4d0508] Running
I0815 00:08:14.620696 33429 system_pods.go:61] "csi-hostpath-resizer-0" [7832e4c7-4b14-4716-a24c-299d683020e7] Running
I0815 00:08:14.620700 33429 system_pods.go:61] "csi-hostpathplugin-9bq4q" [20f345c9-95b5-4fdd-9b09-0ef44d9e025c] Running
I0815 00:08:14.620703 33429 system_pods.go:61] "etcd-addons-877132" [c9fcbdb6-c56f-4565-955e-bd059a243317] Running
I0815 00:08:14.620706 33429 system_pods.go:61] "kindnet-chbk7" [d5bb12f8-f766-4a6c-96d9-4a736660a5d4] Running
I0815 00:08:14.620710 33429 system_pods.go:61] "kube-apiserver-addons-877132" [f11ef0cb-06f5-43c2-ab90-9e16415dfbdb] Running
I0815 00:08:14.620715 33429 system_pods.go:61] "kube-controller-manager-addons-877132" [feefe7f6-b920-4abc-868e-c757b7f0611e] Running
I0815 00:08:14.620719 33429 system_pods.go:61] "kube-ingress-dns-minikube" [a8fc2d7b-0cd2-425b-a632-15debd9dd0c7] Running
I0815 00:08:14.620724 33429 system_pods.go:61] "kube-proxy-v6kx7" [ba0854ec-7db4-4e33-9e58-c440a176fab5] Running
I0815 00:08:14.620728 33429 system_pods.go:61] "kube-scheduler-addons-877132" [711196de-fe86-4df3-9d53-f4e1ccd343e5] Running
I0815 00:08:14.620733 33429 system_pods.go:61] "metrics-server-8988944d9-sgrxc" [39bb006b-3cb8-4b3f-bd6c-a14e00873f12] Running
I0815 00:08:14.620741 33429 system_pods.go:61] "nvidia-device-plugin-daemonset-6d62n" [0b96b707-d892-4a7c-9728-5d4ddf5b5465] Running
I0815 00:08:14.620747 33429 system_pods.go:61] "registry-6fb4cdfc84-r4n2w" [6ba345fc-6428-44c4-a39f-a525f747a85d] Running
I0815 00:08:14.620755 33429 system_pods.go:61] "registry-proxy-9j2gn" [dafac940-abdc-432d-9a46-cf80da8907aa] Running
I0815 00:08:14.620759 33429 system_pods.go:61] "snapshot-controller-56fcc65765-fcg26" [94c41682-f8b9-44c9-be9d-f4967e9d88fb] Running
I0815 00:08:14.620762 33429 system_pods.go:61] "snapshot-controller-56fcc65765-gmh75" [8d111fc4-b50c-4b66-b7ed-f75310edc407] Running
I0815 00:08:14.620765 33429 system_pods.go:61] "storage-provisioner" [da0204ad-464f-432a-8431-4e0541f190da] Running
I0815 00:08:14.620771 33429 system_pods.go:61] "tiller-deploy-b48cc5f79-bthmf" [62d076df-bde8-40cf-ab28-b8fba5fea0d6] Running
I0815 00:08:14.620777 33429 system_pods.go:74] duration metric: took 3.317132352s to wait for pod list to return data ...
I0815 00:08:14.620786 33429 default_sa.go:34] waiting for default service account to be created ...
I0815 00:08:14.623061 33429 default_sa.go:45] found service account: "default"
I0815 00:08:14.623081 33429 default_sa.go:55] duration metric: took 2.290351ms for default service account to be created ...
I0815 00:08:14.623090 33429 system_pods.go:116] waiting for k8s-apps to be running ...
I0815 00:08:14.630696 33429 system_pods.go:86] 19 kube-system pods found
I0815 00:08:14.630721 33429 system_pods.go:89] "coredns-6f6b679f8f-c42pc" [c7d6d0e1-376e-4009-b23c-4ec563e9fb5c] Running
I0815 00:08:14.630729 33429 system_pods.go:89] "csi-hostpath-attacher-0" [fc9a04f6-9b77-46c0-8179-7faf0b4d0508] Running
I0815 00:08:14.630735 33429 system_pods.go:89] "csi-hostpath-resizer-0" [7832e4c7-4b14-4716-a24c-299d683020e7] Running
I0815 00:08:14.630741 33429 system_pods.go:89] "csi-hostpathplugin-9bq4q" [20f345c9-95b5-4fdd-9b09-0ef44d9e025c] Running
I0815 00:08:14.630746 33429 system_pods.go:89] "etcd-addons-877132" [c9fcbdb6-c56f-4565-955e-bd059a243317] Running
I0815 00:08:14.630752 33429 system_pods.go:89] "kindnet-chbk7" [d5bb12f8-f766-4a6c-96d9-4a736660a5d4] Running
I0815 00:08:14.630758 33429 system_pods.go:89] "kube-apiserver-addons-877132" [f11ef0cb-06f5-43c2-ab90-9e16415dfbdb] Running
I0815 00:08:14.630766 33429 system_pods.go:89] "kube-controller-manager-addons-877132" [feefe7f6-b920-4abc-868e-c757b7f0611e] Running
I0815 00:08:14.630773 33429 system_pods.go:89] "kube-ingress-dns-minikube" [a8fc2d7b-0cd2-425b-a632-15debd9dd0c7] Running
I0815 00:08:14.630783 33429 system_pods.go:89] "kube-proxy-v6kx7" [ba0854ec-7db4-4e33-9e58-c440a176fab5] Running
I0815 00:08:14.630790 33429 system_pods.go:89] "kube-scheduler-addons-877132" [711196de-fe86-4df3-9d53-f4e1ccd343e5] Running
I0815 00:08:14.630798 33429 system_pods.go:89] "metrics-server-8988944d9-sgrxc" [39bb006b-3cb8-4b3f-bd6c-a14e00873f12] Running
I0815 00:08:14.630809 33429 system_pods.go:89] "nvidia-device-plugin-daemonset-6d62n" [0b96b707-d892-4a7c-9728-5d4ddf5b5465] Running
I0815 00:08:14.630817 33429 system_pods.go:89] "registry-6fb4cdfc84-r4n2w" [6ba345fc-6428-44c4-a39f-a525f747a85d] Running
I0815 00:08:14.630827 33429 system_pods.go:89] "registry-proxy-9j2gn" [dafac940-abdc-432d-9a46-cf80da8907aa] Running
I0815 00:08:14.630834 33429 system_pods.go:89] "snapshot-controller-56fcc65765-fcg26" [94c41682-f8b9-44c9-be9d-f4967e9d88fb] Running
I0815 00:08:14.630844 33429 system_pods.go:89] "snapshot-controller-56fcc65765-gmh75" [8d111fc4-b50c-4b66-b7ed-f75310edc407] Running
I0815 00:08:14.630853 33429 system_pods.go:89] "storage-provisioner" [da0204ad-464f-432a-8431-4e0541f190da] Running
I0815 00:08:14.630859 33429 system_pods.go:89] "tiller-deploy-b48cc5f79-bthmf" [62d076df-bde8-40cf-ab28-b8fba5fea0d6] Running
I0815 00:08:14.630869 33429 system_pods.go:126] duration metric: took 7.771619ms to wait for k8s-apps to be running ...
I0815 00:08:14.630880 33429 system_svc.go:44] waiting for kubelet service to be running ....
I0815 00:08:14.630927 33429 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I0815 00:08:14.641293 33429 system_svc.go:56] duration metric: took 10.409007ms WaitForService to wait for kubelet
I0815 00:08:14.641320 33429 kubeadm.go:582] duration metric: took 2m9.013343958s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0815 00:08:14.641343 33429 node_conditions.go:102] verifying NodePressure condition ...
I0815 00:08:14.644057 33429 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I0815 00:08:14.644080 33429 node_conditions.go:123] node cpu capacity is 8
I0815 00:08:14.644090 33429 node_conditions.go:105] duration metric: took 2.743633ms to run NodePressure ...
I0815 00:08:14.644101 33429 start.go:241] waiting for startup goroutines ...
I0815 00:08:14.644107 33429 start.go:246] waiting for cluster config update ...
I0815 00:08:14.644121 33429 start.go:255] writing updated cluster config ...
I0815 00:08:14.644346 33429 ssh_runner.go:195] Run: rm -f paused
I0815 00:08:14.690031 33429 start.go:600] kubectl: 1.31.0, cluster: 1.31.0 (minor skew: 0)
I0815 00:08:14.691992 33429 out.go:177] * Done! kubectl is now configured to use "addons-877132" cluster and "default" namespace by default
==> CRI-O <==
Aug 15 00:11:21 addons-877132 crio[1030]: time="2024-08-15 00:11:21.667290168Z" level=info msg="Image status: &ImageStatusResponse{Image:&Image{Id:9056ab77afb8e18e04303f11000a9d31b3f16b74c59475b899ae1b342d328d30,RepoTags:[docker.io/kicbase/echo-server:1.0],RepoDigests:[docker.io/kicbase/echo-server@sha256:127ac38a2bb9537b7f252addff209ea6801edcac8a92c8b1104dacd66a583ed6 docker.io/kicbase/echo-server@sha256:a82eba7887a40ecae558433f34225b2611dc77f982ce05b1ddb9b282b780fc86],Size_:4944818,Uid:nil,Username:,Spec:nil,},Info:map[string]string{},}" id=145eb94e-2b9e-4e45-b88a-3d1d6ef57e9b name=/runtime.v1.ImageService/ImageStatus
Aug 15 00:11:21 addons-877132 crio[1030]: time="2024-08-15 00:11:21.667948532Z" level=info msg="Creating container: default/hello-world-app-55bf9c44b4-jw59v/hello-world-app" id=811e0cd4-e075-476c-88b4-28ffcd3459fe name=/runtime.v1.RuntimeService/CreateContainer
Aug 15 00:11:21 addons-877132 crio[1030]: time="2024-08-15 00:11:21.668041226Z" level=warning msg="Allowed annotations are specified for workload []"
Aug 15 00:11:21 addons-877132 crio[1030]: time="2024-08-15 00:11:21.683178281Z" level=warning msg="Failed to open /etc/passwd: open /var/lib/containers/storage/overlay/27316ecac6d7b0fabf24fb992047dca01c1dc1e53a897e5551c292b138e4352c/merged/etc/passwd: no such file or directory"
Aug 15 00:11:21 addons-877132 crio[1030]: time="2024-08-15 00:11:21.683209197Z" level=warning msg="Failed to open /etc/group: open /var/lib/containers/storage/overlay/27316ecac6d7b0fabf24fb992047dca01c1dc1e53a897e5551c292b138e4352c/merged/etc/group: no such file or directory"
Aug 15 00:11:21 addons-877132 crio[1030]: time="2024-08-15 00:11:21.713091649Z" level=info msg="Created container a77039a7ec09160ed190c729e30ef59309e91cc4d276738f4a10e97171c54eba: default/hello-world-app-55bf9c44b4-jw59v/hello-world-app" id=811e0cd4-e075-476c-88b4-28ffcd3459fe name=/runtime.v1.RuntimeService/CreateContainer
Aug 15 00:11:21 addons-877132 crio[1030]: time="2024-08-15 00:11:21.713596497Z" level=info msg="Starting container: a77039a7ec09160ed190c729e30ef59309e91cc4d276738f4a10e97171c54eba" id=69c3df8f-751f-492d-bb35-9c1bff92eabf name=/runtime.v1.RuntimeService/StartContainer
Aug 15 00:11:21 addons-877132 crio[1030]: time="2024-08-15 00:11:21.718525559Z" level=info msg="Started container" PID=11216 containerID=a77039a7ec09160ed190c729e30ef59309e91cc4d276738f4a10e97171c54eba description=default/hello-world-app-55bf9c44b4-jw59v/hello-world-app id=69c3df8f-751f-492d-bb35-9c1bff92eabf name=/runtime.v1.RuntimeService/StartContainer sandboxID=e2e48d3540fb534143f7578821450552a2724443486cfc153768b34a2d2f693d
Aug 15 00:11:22 addons-877132 crio[1030]: time="2024-08-15 00:11:22.369279616Z" level=info msg="Removing container: 4303733cb1643a6d48d8e2e86b379d2610f3e730ca3c15256d2fdef5280face9" id=9330cad4-53a9-45f6-a6ea-fc1e34e1042a name=/runtime.v1.RuntimeService/RemoveContainer
Aug 15 00:11:22 addons-877132 crio[1030]: time="2024-08-15 00:11:22.382780146Z" level=info msg="Removed container 4303733cb1643a6d48d8e2e86b379d2610f3e730ca3c15256d2fdef5280face9: kube-system/kube-ingress-dns-minikube/minikube-ingress-dns" id=9330cad4-53a9-45f6-a6ea-fc1e34e1042a name=/runtime.v1.RuntimeService/RemoveContainer
Aug 15 00:11:23 addons-877132 crio[1030]: time="2024-08-15 00:11:23.892764775Z" level=info msg="Stopping container: 8b6c013e33250c6bcb7d48fe79db0c35b0f196cc41a0c7b7457c198e31fee13d (timeout: 2s)" id=796a1376-cb99-419b-8ae5-8578c17ba820 name=/runtime.v1.RuntimeService/StopContainer
Aug 15 00:11:25 addons-877132 crio[1030]: time="2024-08-15 00:11:25.898483374Z" level=warning msg="Stopping container 8b6c013e33250c6bcb7d48fe79db0c35b0f196cc41a0c7b7457c198e31fee13d with stop signal timed out: timeout reached after 2 seconds waiting for container process to exit" id=796a1376-cb99-419b-8ae5-8578c17ba820 name=/runtime.v1.RuntimeService/StopContainer
Aug 15 00:11:25 addons-877132 conmon[6433]: conmon 8b6c013e33250c6bcb7d <ninfo>: container 6445 exited with status 137
Aug 15 00:11:26 addons-877132 crio[1030]: time="2024-08-15 00:11:26.028023902Z" level=info msg="Stopped container 8b6c013e33250c6bcb7d48fe79db0c35b0f196cc41a0c7b7457c198e31fee13d: ingress-nginx/ingress-nginx-controller-7559cbf597-qfwsb/controller" id=796a1376-cb99-419b-8ae5-8578c17ba820 name=/runtime.v1.RuntimeService/StopContainer
Aug 15 00:11:26 addons-877132 crio[1030]: time="2024-08-15 00:11:26.028481605Z" level=info msg="Stopping pod sandbox: e904b5086ff6b9ad611ea53e3260a51d8d9922116446bf06cf84b59b0dc131c4" id=cb383282-5d43-4287-8d1e-b6a4525c6ffc name=/runtime.v1.RuntimeService/StopPodSandbox
Aug 15 00:11:26 addons-877132 crio[1030]: time="2024-08-15 00:11:26.031294965Z" level=info msg="Restoring iptables rules: *nat\n:KUBE-HP-SIQZTLT7E2ELBK4O - [0:0]\n:KUBE-HOSTPORTS - [0:0]\n:KUBE-HP-DPMPHDS3ZAPSCFT6 - [0:0]\n-X KUBE-HP-SIQZTLT7E2ELBK4O\n-X KUBE-HP-DPMPHDS3ZAPSCFT6\nCOMMIT\n"
Aug 15 00:11:26 addons-877132 crio[1030]: time="2024-08-15 00:11:26.032535563Z" level=info msg="Closing host port tcp:80"
Aug 15 00:11:26 addons-877132 crio[1030]: time="2024-08-15 00:11:26.032569846Z" level=info msg="Closing host port tcp:443"
Aug 15 00:11:26 addons-877132 crio[1030]: time="2024-08-15 00:11:26.033881278Z" level=info msg="Host port tcp:80 does not have an open socket"
Aug 15 00:11:26 addons-877132 crio[1030]: time="2024-08-15 00:11:26.033901687Z" level=info msg="Host port tcp:443 does not have an open socket"
Aug 15 00:11:26 addons-877132 crio[1030]: time="2024-08-15 00:11:26.034094225Z" level=info msg="Got pod network &{Name:ingress-nginx-controller-7559cbf597-qfwsb Namespace:ingress-nginx ID:e904b5086ff6b9ad611ea53e3260a51d8d9922116446bf06cf84b59b0dc131c4 UID:382b8687-211c-483f-ae46-64db5d2c2738 NetNS:/var/run/netns/ce926a11-4517-41a1-9901-186bc4c4d261 Networks:[{Name:kindnet Ifname:eth0}] RuntimeConfig:map[kindnet:{IP: MAC: PortMappings:[] Bandwidth:<nil> IpRanges:[]}] Aliases:map[]}"
Aug 15 00:11:26 addons-877132 crio[1030]: time="2024-08-15 00:11:26.034203210Z" level=info msg="Deleting pod ingress-nginx_ingress-nginx-controller-7559cbf597-qfwsb from CNI network \"kindnet\" (type=ptp)"
Aug 15 00:11:26 addons-877132 crio[1030]: time="2024-08-15 00:11:26.062960615Z" level=info msg="Stopped pod sandbox: e904b5086ff6b9ad611ea53e3260a51d8d9922116446bf06cf84b59b0dc131c4" id=cb383282-5d43-4287-8d1e-b6a4525c6ffc name=/runtime.v1.RuntimeService/StopPodSandbox
Aug 15 00:11:26 addons-877132 crio[1030]: time="2024-08-15 00:11:26.380101929Z" level=info msg="Removing container: 8b6c013e33250c6bcb7d48fe79db0c35b0f196cc41a0c7b7457c198e31fee13d" id=1c2b606a-14e4-42f5-a1b2-78ab1b63db5f name=/runtime.v1.RuntimeService/RemoveContainer
Aug 15 00:11:26 addons-877132 crio[1030]: time="2024-08-15 00:11:26.392251595Z" level=info msg="Removed container 8b6c013e33250c6bcb7d48fe79db0c35b0f196cc41a0c7b7457c198e31fee13d: ingress-nginx/ingress-nginx-controller-7559cbf597-qfwsb/controller" id=1c2b606a-14e4-42f5-a1b2-78ab1b63db5f name=/runtime.v1.RuntimeService/RemoveContainer
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD
a77039a7ec091 docker.io/kicbase/echo-server@sha256:127ac38a2bb9537b7f252addff209ea6801edcac8a92c8b1104dacd66a583ed6 9 seconds ago Running hello-world-app 0 e2e48d3540fb5 hello-world-app-55bf9c44b4-jw59v
4700be58d0014 docker.io/library/nginx@sha256:208b70eefac13ee9be00e486f79c695b15cef861c680527171a27d253d834be9 2 minutes ago Running nginx 0 59d991d01214e nginx
13564dbfd5a46 gcr.io/k8s-minikube/busybox@sha256:2d03e6ceeb99250061dd110530b0ece7998cd84121f952adef120ea7c5a6f00e 3 minutes ago Running busybox 0 75dbd279c225c busybox
639140631be81 684c5ea3b61b299cd4e713c10bfd8989341da91f6175e2e6e502869c0781fb66 4 minutes ago Exited patch 2 2787c4d975b29 ingress-nginx-admission-patch-pds8t
ce1da5c1eb8b3 registry.k8s.io/ingress-nginx/kube-webhook-certgen@sha256:35379defc3e7025b1c00d37092f560ce87d06ea5ab35d04ff8a0cf22d316bcf2 4 minutes ago Exited create 0 d13d9edd73ff2 ingress-nginx-admission-create-6bdfx
70a331a391562 registry.k8s.io/metrics-server/metrics-server@sha256:31f034feb3f16062e93be7c40efc596553c89de172e2e412e588f02382388872 4 minutes ago Running metrics-server 0 f0f38be4fe7eb metrics-server-8988944d9-sgrxc
dd563d287505a docker.io/rancher/local-path-provisioner@sha256:73f712e7af12b06720c35ce75217f904f00e4bd96de79f8db1cf160112e667ef 4 minutes ago Running local-path-provisioner 0 e7ea6d4b55ddc local-path-provisioner-86d989889c-zjfx8
4ba66a3367daf cbb01a7bd410dc08ba382018ab909a674fb0e48687f0c00797ed5bc34fcc6bb4 5 minutes ago Running coredns 0 5a0b205e08bed coredns-6f6b679f8f-c42pc
03e7fb303164d 6e38f40d628db3002f5617342c8872c935de530d867d0f709a2fbda1a302a562 5 minutes ago Running storage-provisioner 0 5106a78d90785 storage-provisioner
17f6bd6dd22c5 docker.io/kindest/kindnetd@sha256:7dd6b2417263c1bdd6840b33fb61c2d0038c044b91195135969b92effa15d56b 5 minutes ago Running kindnet-cni 0 dfc330b405c09 kindnet-chbk7
e5fd37ee5ee48 ad83b2ca7b09e6162f96f933eecded731cbebf049c78f941fd0ce560a86b6494 5 minutes ago Running kube-proxy 0 0a7cee1c53467 kube-proxy-v6kx7
bd77b5ecfadb9 1766f54c897f0e57040741e6741462f2e3a7d754705f446c9f729c7e1230fb94 5 minutes ago Running kube-scheduler 0 af893679823f2 kube-scheduler-addons-877132
f16f228580088 2e96e5913fc06e3d26915af3d0f2ca5048cc4b6327e661e80da792cbf8d8d9d4 5 minutes ago Running etcd 0 d052b7010e20a etcd-addons-877132
ea70e9f2778e6 604f5db92eaa823d11c141d8825f1460206f6bf29babca2a909a698dc22055d3 5 minutes ago Running kube-apiserver 0 9f6fa62a9f394 kube-apiserver-addons-877132
4043a5cc95e0b 045733566833c40b15806c9b87d27f08e455e069833752e0e6ad7a76d37cb2b1 5 minutes ago Running kube-controller-manager 0 2875939046c1e kube-controller-manager-addons-877132
==> coredns [4ba66a3367daf4b53a35259a09c9bd004e02804ed6293ed0baf74ac2ef4f06d3] <==
[INFO] 10.244.0.2:47292 - 10763 "AAAA IN registry.kube-system.svc.cluster.local.cluster.local. udp 70 false 512" NXDOMAIN qr,aa,rd 163 0.000113724s
[INFO] 10.244.0.2:43420 - 43249 "AAAA IN registry.kube-system.svc.cluster.local.us-east4-a.c.k8s-minikube.internal. udp 91 false 512" NXDOMAIN qr,rd,ra 91 0.004319366s
[INFO] 10.244.0.2:43420 - 28402 "A IN registry.kube-system.svc.cluster.local.us-east4-a.c.k8s-minikube.internal. udp 91 false 512" NXDOMAIN qr,rd,ra 91 0.005696932s
[INFO] 10.244.0.2:44876 - 48744 "A IN registry.kube-system.svc.cluster.local.c.k8s-minikube.internal. udp 80 false 512" NXDOMAIN qr,rd,ra 80 0.005006536s
[INFO] 10.244.0.2:44876 - 2155 "AAAA IN registry.kube-system.svc.cluster.local.c.k8s-minikube.internal. udp 80 false 512" NXDOMAIN qr,rd,ra 80 0.018558843s
[INFO] 10.244.0.2:58123 - 62458 "A IN registry.kube-system.svc.cluster.local.google.internal. udp 72 false 512" NXDOMAIN qr,rd,ra 72 0.004398771s
[INFO] 10.244.0.2:58123 - 33254 "AAAA IN registry.kube-system.svc.cluster.local.google.internal. udp 72 false 512" NXDOMAIN qr,rd,ra 72 0.004753437s
[INFO] 10.244.0.2:33809 - 16532 "AAAA IN registry.kube-system.svc.cluster.local. udp 56 false 512" NOERROR qr,aa,rd 149 0.000082026s
[INFO] 10.244.0.2:33809 - 31121 "A IN registry.kube-system.svc.cluster.local. udp 56 false 512" NOERROR qr,aa,rd 110 0.000117847s
[INFO] 10.244.0.20:57076 - 48443 "AAAA IN storage.googleapis.com.gcp-auth.svc.cluster.local. udp 78 false 1232" NXDOMAIN qr,aa,rd 160 0.000153305s
[INFO] 10.244.0.20:60786 - 431 "A IN storage.googleapis.com.gcp-auth.svc.cluster.local. udp 78 false 1232" NXDOMAIN qr,aa,rd 160 0.000243713s
[INFO] 10.244.0.20:48667 - 18154 "AAAA IN storage.googleapis.com.svc.cluster.local. udp 69 false 1232" NXDOMAIN qr,aa,rd 151 0.000120297s
[INFO] 10.244.0.20:60036 - 57584 "A IN storage.googleapis.com.svc.cluster.local. udp 69 false 1232" NXDOMAIN qr,aa,rd 151 0.000157371s
[INFO] 10.244.0.20:35143 - 29134 "AAAA IN storage.googleapis.com.cluster.local. udp 65 false 1232" NXDOMAIN qr,aa,rd 147 0.000109833s
[INFO] 10.244.0.20:54584 - 62281 "A IN storage.googleapis.com.cluster.local. udp 65 false 1232" NXDOMAIN qr,aa,rd 147 0.000111359s
[INFO] 10.244.0.20:33107 - 11591 "AAAA IN storage.googleapis.com.us-east4-a.c.k8s-minikube.internal. udp 86 false 1232" NXDOMAIN qr,rd,ra 75 0.007471795s
[INFO] 10.244.0.20:41578 - 30236 "A IN storage.googleapis.com.us-east4-a.c.k8s-minikube.internal. udp 86 false 1232" NXDOMAIN qr,rd,ra 75 0.007561275s
[INFO] 10.244.0.20:44643 - 23246 "A IN storage.googleapis.com.c.k8s-minikube.internal. udp 75 false 1232" NXDOMAIN qr,rd,ra 64 0.004784347s
[INFO] 10.244.0.20:57858 - 50433 "AAAA IN storage.googleapis.com.c.k8s-minikube.internal. udp 75 false 1232" NXDOMAIN qr,rd,ra 64 0.006739376s
[INFO] 10.244.0.20:33262 - 49571 "A IN storage.googleapis.com.google.internal. udp 67 false 1232" NXDOMAIN qr,rd,ra 56 0.00388293s
[INFO] 10.244.0.20:54723 - 15767 "AAAA IN storage.googleapis.com.google.internal. udp 67 false 1232" NXDOMAIN qr,rd,ra 56 0.004555087s
[INFO] 10.244.0.20:33820 - 52576 "A IN storage.googleapis.com. udp 51 false 1232" NOERROR qr,rd,ra 458 0.001059757s
[INFO] 10.244.0.20:40606 - 49433 "AAAA IN storage.googleapis.com. udp 51 false 1232" NOERROR qr,rd,ra 240 0.001834019s
[INFO] 10.244.0.26:35309 - 2 "AAAA IN registry.kube-system.svc.cluster.local. udp 56 false 512" NOERROR qr,aa,rd 149 0.000170417s
[INFO] 10.244.0.26:52476 - 3 "A IN registry.kube-system.svc.cluster.local. udp 56 false 512" NOERROR qr,aa,rd 110 0.000113322s
==> describe nodes <==
Name: addons-877132
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=addons-877132
kubernetes.io/os=linux
minikube.k8s.io/commit=a560a51f794134545edbbeb49e1ab4a0b1355168
minikube.k8s.io/name=addons-877132
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2024_08_15T00_06_00_0700
minikube.k8s.io/version=v1.33.1
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
topology.hostpath.csi/node=addons-877132
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///var/run/crio/crio.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Thu, 15 Aug 2024 00:05:58 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: addons-877132
AcquireTime: <unset>
RenewTime: Thu, 15 Aug 2024 00:11:26 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Thu, 15 Aug 2024 00:09:34 +0000 Thu, 15 Aug 2024 00:05:56 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Thu, 15 Aug 2024 00:09:34 +0000 Thu, 15 Aug 2024 00:05:56 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Thu, 15 Aug 2024 00:09:34 +0000 Thu, 15 Aug 2024 00:05:56 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Thu, 15 Aug 2024 00:09:34 +0000 Thu, 15 Aug 2024 00:06:23 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.49.2
Hostname: addons-877132
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32859312Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32859312Ki
pods: 110
System Info:
Machine ID: 7ac911fcfea74347829f75c9c0b9cec6
System UUID: c27f2cf4-9042-4197-8c06-a1fdd73beeb7
Boot ID: adfcefd8-b451-4316-855f-752470c63d29
Kernel Version: 5.15.0-1066-gcp
OS Image: Ubuntu 22.04.4 LTS
Operating System: linux
Architecture: amd64
Container Runtime Version: cri-o://1.24.6
Kubelet Version: v1.31.0
Kube-Proxy Version:
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (13 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 3m17s
default hello-world-app-55bf9c44b4-jw59v 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 11s
default nginx 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 2m30s
kube-system coredns-6f6b679f8f-c42pc 100m (1%!)(MISSING) 0 (0%!)(MISSING) 70Mi (0%!)(MISSING) 170Mi (0%!)(MISSING) 5m25s
kube-system etcd-addons-877132 100m (1%!)(MISSING) 0 (0%!)(MISSING) 100Mi (0%!)(MISSING) 0 (0%!)(MISSING) 5m31s
kube-system kindnet-chbk7 100m (1%!)(MISSING) 100m (1%!)(MISSING) 50Mi (0%!)(MISSING) 50Mi (0%!)(MISSING) 5m26s
kube-system kube-apiserver-addons-877132 250m (3%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 5m31s
kube-system kube-controller-manager-addons-877132 200m (2%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 5m31s
kube-system kube-proxy-v6kx7 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 5m26s
kube-system kube-scheduler-addons-877132 100m (1%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 5m31s
kube-system metrics-server-8988944d9-sgrxc 100m (1%!)(MISSING) 0 (0%!)(MISSING) 200Mi (0%!)(MISSING) 0 (0%!)(MISSING) 5m21s
kube-system storage-provisioner 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 5m21s
local-path-storage local-path-provisioner-86d989889c-zjfx8 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 5m21s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 950m (11%!)(MISSING) 100m (1%!)(MISSING)
memory 420Mi (1%!)(MISSING) 220Mi (0%!)(MISSING)
ephemeral-storage 0 (0%!)(MISSING) 0 (0%!)(MISSING)
hugepages-1Gi 0 (0%!)(MISSING) 0 (0%!)(MISSING)
hugepages-2Mi 0 (0%!)(MISSING) 0 (0%!)(MISSING)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 5m20s kube-proxy
Normal NodeHasSufficientMemory 5m36s (x8 over 5m36s) kubelet Node addons-877132 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 5m36s (x8 over 5m36s) kubelet Node addons-877132 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 5m36s (x7 over 5m36s) kubelet Node addons-877132 status is now: NodeHasSufficientPID
Normal Starting 5m31s kubelet Starting kubelet.
Warning CgroupV1 5m31s kubelet Cgroup v1 support is in maintenance mode, please migrate to Cgroup v2.
Normal NodeHasSufficientMemory 5m31s kubelet Node addons-877132 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 5m31s kubelet Node addons-877132 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 5m31s kubelet Node addons-877132 status is now: NodeHasSufficientPID
Normal RegisteredNode 5m27s node-controller Node addons-877132 event: Registered Node addons-877132 in Controller
Normal NodeReady 5m8s kubelet Node addons-877132 status is now: NodeReady
==> dmesg <==
[ +0.000630] platform eisa.0: Cannot allocate resource for EISA slot 5
[ +0.000616] platform eisa.0: Cannot allocate resource for EISA slot 6
[ +0.000605] platform eisa.0: Cannot allocate resource for EISA slot 7
[ +0.000609] platform eisa.0: Cannot allocate resource for EISA slot 8
[ +0.594631] systemd[1]: Configuration file /run/systemd/system/netplan-ovs-cleanup.service is marked world-inaccessible. This has no effect as configuration data is accessible via APIs without restrictions. Proceeding anyway.
[ +0.044972] systemd[1]: /lib/systemd/system/cloud-init-local.service:15: Unknown key name 'ConditionEnvironment' in section 'Unit', ignoring.
[ +0.005902] systemd[1]: /lib/systemd/system/cloud-init.service:19: Unknown key name 'ConditionEnvironment' in section 'Unit', ignoring.
[ +0.013048] systemd[1]: /lib/systemd/system/cloud-final.service:9: Unknown key name 'ConditionEnvironment' in section 'Unit', ignoring.
[ +0.002588] systemd[1]: /lib/systemd/system/cloud-config.service:8: Unknown key name 'ConditionEnvironment' in section 'Unit', ignoring.
[ +0.017548] systemd[1]: /lib/systemd/system/cloud-init.target:15: Unknown key name 'ConditionEnvironment' in section 'Unit', ignoring.
[ +6.299942] kauditd_printk_skb: 46 callbacks suppressed
[Aug15 00:09] IPv4: martian source 10.244.0.21 from 127.0.0.1, on dev eth0
[ +0.000014] ll header: 00000000: 96 a8 0a 24 52 dc de 81 16 9f 0c cd 08 00
[ +1.000074] IPv4: martian source 10.244.0.21 from 127.0.0.1, on dev eth0
[ +0.000005] ll header: 00000000: 96 a8 0a 24 52 dc de 81 16 9f 0c cd 08 00
[ +2.015815] IPv4: martian source 10.244.0.21 from 127.0.0.1, on dev eth0
[ +0.000005] ll header: 00000000: 96 a8 0a 24 52 dc de 81 16 9f 0c cd 08 00
[ +4.255606] IPv4: martian source 10.244.0.21 from 127.0.0.1, on dev eth0
[ +0.000022] ll header: 00000000: 96 a8 0a 24 52 dc de 81 16 9f 0c cd 08 00
[ +8.191208] IPv4: martian source 10.244.0.21 from 127.0.0.1, on dev eth0
[ +0.000020] ll header: 00000000: 96 a8 0a 24 52 dc de 81 16 9f 0c cd 08 00
[ +16.126475] IPv4: martian source 10.244.0.21 from 127.0.0.1, on dev eth0
[ +0.000007] ll header: 00000000: 96 a8 0a 24 52 dc de 81 16 9f 0c cd 08 00
[Aug15 00:10] IPv4: martian source 10.244.0.21 from 127.0.0.1, on dev eth0
[ +0.000030] ll header: 00000000: 96 a8 0a 24 52 dc de 81 16 9f 0c cd 08 00
==> etcd [f16f228580088705d9d978dd7020a6f22abb4f154e076a1986297e2d03e0cdec] <==
{"level":"warn","ts":"2024-08-15T00:06:09.661825Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"107.013375ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/serviceaccounts/kube-system/tiller\" ","response":"range_response_count:0 size:5"}
{"level":"info","ts":"2024-08-15T00:06:09.661889Z","caller":"traceutil/trace.go:171","msg":"trace[2058467710] range","detail":"{range_begin:/registry/serviceaccounts/kube-system/tiller; range_end:; response_count:0; response_revision:449; }","duration":"107.078526ms","start":"2024-08-15T00:06:09.554802Z","end":"2024-08-15T00:06:09.661881Z","steps":["trace[2058467710] 'agreement among raft nodes before linearized reading' (duration: 106.974073ms)"],"step_count":1}
{"level":"info","ts":"2024-08-15T00:06:09.960832Z","caller":"traceutil/trace.go:171","msg":"trace[1035712257] linearizableReadLoop","detail":"{readStateIndex:469; appliedIndex:468; }","duration":"186.731601ms","start":"2024-08-15T00:06:09.774078Z","end":"2024-08-15T00:06:09.960809Z","steps":["trace[1035712257] 'read index received' (duration: 183.924781ms)","trace[1035712257] 'applied index is now lower than readState.Index' (duration: 2.806055ms)"],"step_count":2}
{"level":"warn","ts":"2024-08-15T00:06:09.961756Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"200.995899ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/health\" ","response":"range_response_count:0 size:5"}
{"level":"info","ts":"2024-08-15T00:06:09.962261Z","caller":"traceutil/trace.go:171","msg":"trace[356290812] range","detail":"{range_begin:/registry/health; range_end:; response_count:0; response_revision:458; }","duration":"201.072963ms","start":"2024-08-15T00:06:09.760739Z","end":"2024-08-15T00:06:09.961812Z","steps":["trace[356290812] 'agreement among raft nodes before linearized reading' (duration: 200.95514ms)"],"step_count":1}
{"level":"info","ts":"2024-08-15T00:06:09.962450Z","caller":"traceutil/trace.go:171","msg":"trace[1489601163] transaction","detail":"{read_only:false; response_revision:458; number_of_response:1; }","duration":"197.664471ms","start":"2024-08-15T00:06:09.764761Z","end":"2024-08-15T00:06:09.962426Z","steps":["trace[1489601163] 'process raft request' (duration: 192.720086ms)"],"step_count":1}
{"level":"info","ts":"2024-08-15T00:06:10.158962Z","caller":"traceutil/trace.go:171","msg":"trace[1685509554] transaction","detail":"{read_only:false; response_revision:464; number_of_response:1; }","duration":"101.328154ms","start":"2024-08-15T00:06:10.057619Z","end":"2024-08-15T00:06:10.158947Z","steps":["trace[1685509554] 'process raft request' (duration: 98.627594ms)"],"step_count":1}
{"level":"info","ts":"2024-08-15T00:06:10.760371Z","caller":"traceutil/trace.go:171","msg":"trace[109234857] transaction","detail":"{read_only:false; response_revision:513; number_of_response:1; }","duration":"184.795977ms","start":"2024-08-15T00:06:10.575558Z","end":"2024-08-15T00:06:10.760354Z","steps":["trace[109234857] 'process raft request' (duration: 178.834908ms)"],"step_count":1}
{"level":"warn","ts":"2024-08-15T00:06:10.760803Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"103.370207ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/replicasets/kube-system/registry-6fb4cdfc84\" ","response":"range_response_count:1 size:2551"}
{"level":"info","ts":"2024-08-15T00:06:10.760848Z","caller":"traceutil/trace.go:171","msg":"trace[1951375711] range","detail":"{range_begin:/registry/replicasets/kube-system/registry-6fb4cdfc84; range_end:; response_count:1; response_revision:515; }","duration":"103.424885ms","start":"2024-08-15T00:06:10.657413Z","end":"2024-08-15T00:06:10.760838Z","steps":["trace[1951375711] 'agreement among raft nodes before linearized reading' (duration: 103.276902ms)"],"step_count":1}
{"level":"info","ts":"2024-08-15T00:06:10.760670Z","caller":"traceutil/trace.go:171","msg":"trace[203738500] linearizableReadLoop","detail":"{readStateIndex:525; appliedIndex:524; }","duration":"103.241482ms","start":"2024-08-15T00:06:10.657417Z","end":"2024-08-15T00:06:10.760659Z","steps":["trace[203738500] 'read index received' (duration: 96.983851ms)","trace[203738500] 'applied index is now lower than readState.Index' (duration: 6.256736ms)"],"step_count":2}
{"level":"info","ts":"2024-08-15T00:06:10.761005Z","caller":"traceutil/trace.go:171","msg":"trace[874513999] transaction","detail":"{read_only:false; response_revision:514; number_of_response:1; }","duration":"103.439543ms","start":"2024-08-15T00:06:10.657558Z","end":"2024-08-15T00:06:10.760998Z","steps":["trace[874513999] 'process raft request' (duration: 102.874463ms)"],"step_count":1}
{"level":"warn","ts":"2024-08-15T00:06:10.764836Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"102.833354ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/namespaces/yakd-dashboard\" ","response":"range_response_count:1 size:883"}
{"level":"info","ts":"2024-08-15T00:06:10.764930Z","caller":"traceutil/trace.go:171","msg":"trace[798552039] range","detail":"{range_begin:/registry/namespaces/yakd-dashboard; range_end:; response_count:1; response_revision:516; }","duration":"102.934075ms","start":"2024-08-15T00:06:10.661985Z","end":"2024-08-15T00:06:10.764919Z","steps":["trace[798552039] 'agreement among raft nodes before linearized reading' (duration: 102.728931ms)"],"step_count":1}
{"level":"warn","ts":"2024-08-15T00:06:10.765323Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"100.385976ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/minions/addons-877132\" ","response":"range_response_count:1 size:5648"}
{"level":"info","ts":"2024-08-15T00:06:10.765416Z","caller":"traceutil/trace.go:171","msg":"trace[306473127] range","detail":"{range_begin:/registry/minions/addons-877132; range_end:; response_count:1; response_revision:516; }","duration":"100.480741ms","start":"2024-08-15T00:06:10.664926Z","end":"2024-08-15T00:06:10.765407Z","steps":["trace[306473127] 'agreement among raft nodes before linearized reading' (duration: 100.370266ms)"],"step_count":1}
{"level":"info","ts":"2024-08-15T00:07:08.287875Z","caller":"traceutil/trace.go:171","msg":"trace[1444096404] linearizableReadLoop","detail":"{readStateIndex:1251; appliedIndex:1250; }","duration":"114.749683ms","start":"2024-08-15T00:07:08.173107Z","end":"2024-08-15T00:07:08.287857Z","steps":["trace[1444096404] 'read index received' (duration: 114.546126ms)","trace[1444096404] 'applied index is now lower than readState.Index' (duration: 202.358µs)"],"step_count":2}
{"level":"info","ts":"2024-08-15T00:07:08.287946Z","caller":"traceutil/trace.go:171","msg":"trace[1350768147] transaction","detail":"{read_only:false; response_revision:1219; number_of_response:1; }","duration":"116.664519ms","start":"2024-08-15T00:07:08.171264Z","end":"2024-08-15T00:07:08.287929Z","steps":["trace[1350768147] 'process raft request' (duration: 116.440866ms)"],"step_count":1}
{"level":"warn","ts":"2024-08-15T00:07:08.288062Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"114.93862ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/minions/addons-877132\" ","response":"range_response_count:1 size:9170"}
{"level":"info","ts":"2024-08-15T00:07:08.288095Z","caller":"traceutil/trace.go:171","msg":"trace[205809032] range","detail":"{range_begin:/registry/minions/addons-877132; range_end:; response_count:1; response_revision:1219; }","duration":"114.983891ms","start":"2024-08-15T00:07:08.173100Z","end":"2024-08-15T00:07:08.288084Z","steps":["trace[205809032] 'agreement among raft nodes before linearized reading' (duration: 114.848983ms)"],"step_count":1}
{"level":"warn","ts":"2024-08-15T00:07:19.613367Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"123.767064ms","expected-duration":"100ms","prefix":"","request":"header:<ID:8128031214691300417 username:\"kube-apiserver-etcd-client\" auth_revision:1 > lease_grant:<ttl:15-second id:70cc91535af63440>","response":"size:41"}
{"level":"info","ts":"2024-08-15T00:07:19.763949Z","caller":"traceutil/trace.go:171","msg":"trace[1049542164] transaction","detail":"{read_only:false; response_revision:1244; number_of_response:1; }","duration":"192.04081ms","start":"2024-08-15T00:07:19.571892Z","end":"2024-08-15T00:07:19.763933Z","steps":["trace[1049542164] 'process raft request' (duration: 169.989844ms)","trace[1049542164] 'compare' (duration: 21.954224ms)"],"step_count":2}
{"level":"info","ts":"2024-08-15T00:07:19.765023Z","caller":"traceutil/trace.go:171","msg":"trace[49433671] transaction","detail":"{read_only:false; response_revision:1245; number_of_response:1; }","duration":"150.971344ms","start":"2024-08-15T00:07:19.614039Z","end":"2024-08-15T00:07:19.765010Z","steps":["trace[49433671] 'process raft request' (duration: 150.874636ms)"],"step_count":1}
{"level":"warn","ts":"2024-08-15T00:09:20.066162Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"107.995095ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/health\" ","response":"range_response_count:0 size:5"}
{"level":"info","ts":"2024-08-15T00:09:20.066232Z","caller":"traceutil/trace.go:171","msg":"trace[1279599202] range","detail":"{range_begin:/registry/health; range_end:; response_count:0; response_revision:1860; }","duration":"108.076285ms","start":"2024-08-15T00:09:19.958140Z","end":"2024-08-15T00:09:20.066216Z","steps":["trace[1279599202] 'range keys from in-memory index tree' (duration: 107.949119ms)"],"step_count":1}
==> kernel <==
00:11:31 up 1:53, 0 users, load average: 0.15, 0.48, 0.31
Linux addons-877132 5.15.0-1066-gcp #74~20.04.1-Ubuntu SMP Fri Jul 26 09:28:41 UTC 2024 x86_64 x86_64 x86_64 GNU/Linux
PRETTY_NAME="Ubuntu 22.04.4 LTS"
==> kindnet [17f6bd6dd22c5288ff6a4dc156ad4dd1d32d2895bfc8398e63705bc5613cf677] <==
E0815 00:10:21.064434 1 reflector.go:150] pkg/mod/k8s.io/client-go@v0.30.3/tools/cache/reflector.go:232: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:serviceaccount:kube-system:kindnet" cannot list resource "pods" in API group "" at the cluster scope
W0815 00:10:22.469985 1 reflector.go:547] pkg/mod/k8s.io/client-go@v0.30.3/tools/cache/reflector.go:232: failed to list *v1.Namespace: namespaces is forbidden: User "system:serviceaccount:kube-system:kindnet" cannot list resource "namespaces" in API group "" at the cluster scope
E0815 00:10:22.470022 1 reflector.go:150] pkg/mod/k8s.io/client-go@v0.30.3/tools/cache/reflector.go:232: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User "system:serviceaccount:kube-system:kindnet" cannot list resource "namespaces" in API group "" at the cluster scope
I0815 00:10:23.555752 1 main.go:295] Handling node with IPs: map[192.168.49.2:{}]
I0815 00:10:23.555792 1 main.go:299] handling current node
I0815 00:10:33.555299 1 main.go:295] Handling node with IPs: map[192.168.49.2:{}]
I0815 00:10:33.555337 1 main.go:299] handling current node
W0815 00:10:34.266809 1 reflector.go:547] pkg/mod/k8s.io/client-go@v0.30.3/tools/cache/reflector.go:232: failed to list *v1.NetworkPolicy: networkpolicies.networking.k8s.io is forbidden: User "system:serviceaccount:kube-system:kindnet" cannot list resource "networkpolicies" in API group "networking.k8s.io" at the cluster scope
E0815 00:10:34.266845 1 reflector.go:150] pkg/mod/k8s.io/client-go@v0.30.3/tools/cache/reflector.go:232: Failed to watch *v1.NetworkPolicy: failed to list *v1.NetworkPolicy: networkpolicies.networking.k8s.io is forbidden: User "system:serviceaccount:kube-system:kindnet" cannot list resource "networkpolicies" in API group "networking.k8s.io" at the cluster scope
I0815 00:10:43.555667 1 main.go:295] Handling node with IPs: map[192.168.49.2:{}]
I0815 00:10:43.555710 1 main.go:299] handling current node
I0815 00:10:53.555221 1 main.go:295] Handling node with IPs: map[192.168.49.2:{}]
I0815 00:10:53.555257 1 main.go:299] handling current node
W0815 00:11:00.296633 1 reflector.go:547] pkg/mod/k8s.io/client-go@v0.30.3/tools/cache/reflector.go:232: failed to list *v1.Pod: pods is forbidden: User "system:serviceaccount:kube-system:kindnet" cannot list resource "pods" in API group "" at the cluster scope
E0815 00:11:00.296662 1 reflector.go:150] pkg/mod/k8s.io/client-go@v0.30.3/tools/cache/reflector.go:232: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:serviceaccount:kube-system:kindnet" cannot list resource "pods" in API group "" at the cluster scope
I0815 00:11:03.555385 1 main.go:295] Handling node with IPs: map[192.168.49.2:{}]
I0815 00:11:03.555416 1 main.go:299] handling current node
W0815 00:11:07.457910 1 reflector.go:547] pkg/mod/k8s.io/client-go@v0.30.3/tools/cache/reflector.go:232: failed to list *v1.NetworkPolicy: networkpolicies.networking.k8s.io is forbidden: User "system:serviceaccount:kube-system:kindnet" cannot list resource "networkpolicies" in API group "networking.k8s.io" at the cluster scope
E0815 00:11:07.457947 1 reflector.go:150] pkg/mod/k8s.io/client-go@v0.30.3/tools/cache/reflector.go:232: Failed to watch *v1.NetworkPolicy: failed to list *v1.NetworkPolicy: networkpolicies.networking.k8s.io is forbidden: User "system:serviceaccount:kube-system:kindnet" cannot list resource "networkpolicies" in API group "networking.k8s.io" at the cluster scope
I0815 00:11:13.556053 1 main.go:295] Handling node with IPs: map[192.168.49.2:{}]
I0815 00:11:13.556086 1 main.go:299] handling current node
W0815 00:11:14.562989 1 reflector.go:547] pkg/mod/k8s.io/client-go@v0.30.3/tools/cache/reflector.go:232: failed to list *v1.Namespace: namespaces is forbidden: User "system:serviceaccount:kube-system:kindnet" cannot list resource "namespaces" in API group "" at the cluster scope
E0815 00:11:14.563021 1 reflector.go:150] pkg/mod/k8s.io/client-go@v0.30.3/tools/cache/reflector.go:232: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User "system:serviceaccount:kube-system:kindnet" cannot list resource "namespaces" in API group "" at the cluster scope
I0815 00:11:23.556046 1 main.go:295] Handling node with IPs: map[192.168.49.2:{}]
I0815 00:11:23.556083 1 main.go:299] handling current node
==> kube-apiserver [ea70e9f2778e6bfd0482547ef4e30fb6b1e37e3161a709b7913a069a0d6c1249] <==
I0815 00:08:04.597292 1 handler.go:286] Adding GroupVersion metrics.k8s.io v1beta1 to ResourceManager
E0815 00:08:22.099434 1 conn.go:339] Error on socket receive: read tcp 192.168.49.2:8443->192.168.49.1:55458: use of closed network connection
E0815 00:08:22.246304 1 conn.go:339] Error on socket receive: read tcp 192.168.49.2:8443->192.168.49.1:55482: use of closed network connection
E0815 00:08:50.694888 1 upgradeaware.go:427] Error proxying data from client to backend: read tcp 192.168.49.2:8443->10.244.0.28:40444: read: connection reset by peer
I0815 00:08:53.212855 1 alloc.go:330] "allocated clusterIPs" service="headlamp/headlamp" clusterIPs={"IPv4":"10.106.166.95"}
I0815 00:08:55.861522 1 controller.go:615] quota admission added evaluator for: volumesnapshots.snapshot.storage.k8s.io
I0815 00:08:56.104430 1 handler.go:286] Adding GroupVersion gadget.kinvolk.io v1alpha1 to ResourceManager
W0815 00:08:57.263455 1 cacher.go:171] Terminating all watchers from cacher traces.gadget.kinvolk.io
I0815 00:09:01.545972 1 controller.go:615] quota admission added evaluator for: ingresses.networking.k8s.io
I0815 00:09:01.698738 1 alloc.go:330] "allocated clusterIPs" service="default/nginx" clusterIPs={"IPv4":"10.105.145.87"}
I0815 00:09:30.366936 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0815 00:09:30.366998 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0815 00:09:30.378927 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0815 00:09:30.378969 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0815 00:09:30.382001 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0815 00:09:30.382053 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0815 00:09:30.391883 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0815 00:09:30.392003 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0815 00:09:30.498706 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0815 00:09:30.498743 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
W0815 00:09:31.382946 1 cacher.go:171] Terminating all watchers from cacher volumesnapshotclasses.snapshot.storage.k8s.io
W0815 00:09:31.499329 1 cacher.go:171] Terminating all watchers from cacher volumesnapshotcontents.snapshot.storage.k8s.io
W0815 00:09:31.509054 1 cacher.go:171] Terminating all watchers from cacher volumesnapshots.snapshot.storage.k8s.io
I0815 00:11:20.893935 1 alloc.go:330] "allocated clusterIPs" service="default/hello-world-app" clusterIPs={"IPv4":"10.109.70.207"}
E0815 00:11:22.915796 1 authentication.go:73] "Unable to authenticate the request" err="[invalid bearer token, serviceaccounts \"ingress-nginx\" not found]"
==> kube-controller-manager [4043a5cc95e0b642f3d43fc29910c36484e438e6f5eb9296ff38dcbd460cb280] <==
W0815 00:10:08.501409 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0815 00:10:08.501443 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0815 00:10:10.708192 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0815 00:10:10.708230 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0815 00:10:23.114171 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0815 00:10:23.114208 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0815 00:10:44.561961 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0815 00:10:44.562001 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0815 00:10:44.792049 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0815 00:10:44.792094 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0815 00:10:49.815010 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0815 00:10:49.815050 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0815 00:10:58.447804 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0815 00:10:58.447889 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
I0815 00:11:20.696942 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="default/hello-world-app-55bf9c44b4" duration="12.54421ms"
I0815 00:11:20.700098 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="default/hello-world-app-55bf9c44b4" duration="3.110621ms"
I0815 00:11:20.700220 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="default/hello-world-app-55bf9c44b4" duration="75.383µs"
I0815 00:11:20.703288 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="default/hello-world-app-55bf9c44b4" duration="34.186µs"
I0815 00:11:22.386057 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="default/hello-world-app-55bf9c44b4" duration="5.560534ms"
I0815 00:11:22.386153 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="default/hello-world-app-55bf9c44b4" duration="45.056µs"
I0815 00:11:22.882165 1 job_controller.go:568] "enqueueing job" logger="job-controller" key="ingress-nginx/ingress-nginx-admission-create" delay="0s"
I0815 00:11:22.883571 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="ingress-nginx/ingress-nginx-controller-7559cbf597" duration="9.901µs"
I0815 00:11:22.885926 1 job_controller.go:568] "enqueueing job" logger="job-controller" key="ingress-nginx/ingress-nginx-admission-patch" delay="0s"
W0815 00:11:29.316553 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0815 00:11:29.316612 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
==> kube-proxy [e5fd37ee5ee48f313a2bbb8de2bf49949a8ca143f909bf33e1d7a3ca648839a1] <==
I0815 00:06:08.771141 1 server_linux.go:66] "Using iptables proxy"
I0815 00:06:09.677856 1 server.go:677] "Successfully retrieved node IP(s)" IPs=["192.168.49.2"]
E0815 00:06:09.677943 1 server.go:234] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I0815 00:06:10.256713 1 server.go:243] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I0815 00:06:10.256859 1 server_linux.go:169] "Using iptables Proxier"
I0815 00:06:10.265814 1 proxier.go:255] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I0815 00:06:10.266511 1 server.go:483] "Version info" version="v1.31.0"
I0815 00:06:10.266784 1 server.go:485] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I0815 00:06:10.268405 1 config.go:197] "Starting service config controller"
I0815 00:06:10.269673 1 shared_informer.go:313] Waiting for caches to sync for service config
I0815 00:06:10.269385 1 config.go:326] "Starting node config controller"
I0815 00:06:10.269842 1 shared_informer.go:313] Waiting for caches to sync for node config
I0815 00:06:10.268940 1 config.go:104] "Starting endpoint slice config controller"
I0815 00:06:10.269934 1 shared_informer.go:313] Waiting for caches to sync for endpoint slice config
I0815 00:06:10.370849 1 shared_informer.go:320] Caches are synced for endpoint slice config
I0815 00:06:10.373009 1 shared_informer.go:320] Caches are synced for service config
I0815 00:06:10.373024 1 shared_informer.go:320] Caches are synced for node config
==> kube-scheduler [bd77b5ecfadb9e4b0eb677138b067975344fac163e26352db7d5ce14d50ed8f0] <==
W0815 00:05:58.063472 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
E0815 00:05:58.063858 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0815 00:05:58.063491 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E0815 00:05:58.063889 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
W0815 00:05:58.063534 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
E0815 00:05:58.063914 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicasets\" in API group \"apps\" at the cluster scope" logger="UnhandledError"
W0815 00:05:58.063535 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
E0815 00:05:58.063935 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User \"system:kube-scheduler\" cannot list resource \"namespaces\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0815 00:05:58.063800 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
E0815 00:05:58.063952 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError"
W0815 00:05:58.984099 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E0815 00:05:58.984141 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0815 00:05:58.991354 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E0815 00:05:58.991389 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
W0815 00:05:59.015615 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
E0815 00:05:59.015655 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"statefulsets\" in API group \"apps\" at the cluster scope" logger="UnhandledError"
W0815 00:05:59.046949 1 reflector.go:561] runtime/asm_amd64.s:1695: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
E0815 00:05:59.046980 1 reflector.go:158] "Unhandled Error" err="runtime/asm_amd64.s:1695: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"extension-apiserver-authentication\" is forbidden: User \"system:kube-scheduler\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\"" logger="UnhandledError"
W0815 00:05:59.062581 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E0815 00:05:59.062629 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
W0815 00:05:59.078903 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
E0815 00:05:59.078935 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User \"system:kube-scheduler\" cannot list resource \"namespaces\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0815 00:05:59.094087 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
E0815 00:05:59.094126 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError"
I0815 00:06:01.762374 1 shared_informer.go:320] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Aug 15 00:11:20 addons-877132 kubelet[1633]: I0815 00:11:20.858072 1633 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jlq5j\" (UniqueName: \"kubernetes.io/projected/c01330a3-4fe5-40e7-ba66-04fc92ff0e44-kube-api-access-jlq5j\") pod \"hello-world-app-55bf9c44b4-jw59v\" (UID: \"c01330a3-4fe5-40e7-ba66-04fc92ff0e44\") " pod="default/hello-world-app-55bf9c44b4-jw59v"
Aug 15 00:11:21 addons-877132 kubelet[1633]: I0815 00:11:21.767339 1633 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tzsmp\" (UniqueName: \"kubernetes.io/projected/a8fc2d7b-0cd2-425b-a632-15debd9dd0c7-kube-api-access-tzsmp\") pod \"a8fc2d7b-0cd2-425b-a632-15debd9dd0c7\" (UID: \"a8fc2d7b-0cd2-425b-a632-15debd9dd0c7\") "
Aug 15 00:11:21 addons-877132 kubelet[1633]: I0815 00:11:21.768992 1633 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a8fc2d7b-0cd2-425b-a632-15debd9dd0c7-kube-api-access-tzsmp" (OuterVolumeSpecName: "kube-api-access-tzsmp") pod "a8fc2d7b-0cd2-425b-a632-15debd9dd0c7" (UID: "a8fc2d7b-0cd2-425b-a632-15debd9dd0c7"). InnerVolumeSpecName "kube-api-access-tzsmp". PluginName "kubernetes.io/projected", VolumeGidValue ""
Aug 15 00:11:21 addons-877132 kubelet[1633]: I0815 00:11:21.868311 1633 reconciler_common.go:288] "Volume detached for volume \"kube-api-access-tzsmp\" (UniqueName: \"kubernetes.io/projected/a8fc2d7b-0cd2-425b-a632-15debd9dd0c7-kube-api-access-tzsmp\") on node \"addons-877132\" DevicePath \"\""
Aug 15 00:11:22 addons-877132 kubelet[1633]: I0815 00:11:22.368372 1633 scope.go:117] "RemoveContainer" containerID="4303733cb1643a6d48d8e2e86b379d2610f3e730ca3c15256d2fdef5280face9"
Aug 15 00:11:22 addons-877132 kubelet[1633]: I0815 00:11:22.382992 1633 scope.go:117] "RemoveContainer" containerID="4303733cb1643a6d48d8e2e86b379d2610f3e730ca3c15256d2fdef5280face9"
Aug 15 00:11:22 addons-877132 kubelet[1633]: E0815 00:11:22.383345 1633 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4303733cb1643a6d48d8e2e86b379d2610f3e730ca3c15256d2fdef5280face9\": container with ID starting with 4303733cb1643a6d48d8e2e86b379d2610f3e730ca3c15256d2fdef5280face9 not found: ID does not exist" containerID="4303733cb1643a6d48d8e2e86b379d2610f3e730ca3c15256d2fdef5280face9"
Aug 15 00:11:22 addons-877132 kubelet[1633]: I0815 00:11:22.383390 1633 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4303733cb1643a6d48d8e2e86b379d2610f3e730ca3c15256d2fdef5280face9"} err="failed to get container status \"4303733cb1643a6d48d8e2e86b379d2610f3e730ca3c15256d2fdef5280face9\": rpc error: code = NotFound desc = could not find container \"4303733cb1643a6d48d8e2e86b379d2610f3e730ca3c15256d2fdef5280face9\": container with ID starting with 4303733cb1643a6d48d8e2e86b379d2610f3e730ca3c15256d2fdef5280face9 not found: ID does not exist"
Aug 15 00:11:22 addons-877132 kubelet[1633]: I0815 00:11:22.389264 1633 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="default/hello-world-app-55bf9c44b4-jw59v" podStartSLOduration=1.791431929 podStartE2EDuration="2.389247938s" podCreationTimestamp="2024-08-15 00:11:20 +0000 UTC" firstStartedPulling="2024-08-15 00:11:21.068385828 +0000 UTC m=+321.107886961" lastFinishedPulling="2024-08-15 00:11:21.666201826 +0000 UTC m=+321.705702970" observedRunningTime="2024-08-15 00:11:22.380800187 +0000 UTC m=+322.420301349" watchObservedRunningTime="2024-08-15 00:11:22.389247938 +0000 UTC m=+322.428749083"
Aug 15 00:11:24 addons-877132 kubelet[1633]: I0815 00:11:24.072102 1633 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0cea3c86-47b9-446f-89d1-bb847ee9969a" path="/var/lib/kubelet/pods/0cea3c86-47b9-446f-89d1-bb847ee9969a/volumes"
Aug 15 00:11:24 addons-877132 kubelet[1633]: I0815 00:11:24.072461 1633 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="56137687-fa9f-4135-8b93-22b9f97e61ad" path="/var/lib/kubelet/pods/56137687-fa9f-4135-8b93-22b9f97e61ad/volumes"
Aug 15 00:11:24 addons-877132 kubelet[1633]: I0815 00:11:24.072737 1633 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a8fc2d7b-0cd2-425b-a632-15debd9dd0c7" path="/var/lib/kubelet/pods/a8fc2d7b-0cd2-425b-a632-15debd9dd0c7/volumes"
Aug 15 00:11:26 addons-877132 kubelet[1633]: I0815 00:11:26.195955 1633 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-twv6n\" (UniqueName: \"kubernetes.io/projected/382b8687-211c-483f-ae46-64db5d2c2738-kube-api-access-twv6n\") pod \"382b8687-211c-483f-ae46-64db5d2c2738\" (UID: \"382b8687-211c-483f-ae46-64db5d2c2738\") "
Aug 15 00:11:26 addons-877132 kubelet[1633]: I0815 00:11:26.195999 1633 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/382b8687-211c-483f-ae46-64db5d2c2738-webhook-cert\") pod \"382b8687-211c-483f-ae46-64db5d2c2738\" (UID: \"382b8687-211c-483f-ae46-64db5d2c2738\") "
Aug 15 00:11:26 addons-877132 kubelet[1633]: I0815 00:11:26.197660 1633 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/382b8687-211c-483f-ae46-64db5d2c2738-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "382b8687-211c-483f-ae46-64db5d2c2738" (UID: "382b8687-211c-483f-ae46-64db5d2c2738"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue ""
Aug 15 00:11:26 addons-877132 kubelet[1633]: I0815 00:11:26.198115 1633 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/382b8687-211c-483f-ae46-64db5d2c2738-kube-api-access-twv6n" (OuterVolumeSpecName: "kube-api-access-twv6n") pod "382b8687-211c-483f-ae46-64db5d2c2738" (UID: "382b8687-211c-483f-ae46-64db5d2c2738"). InnerVolumeSpecName "kube-api-access-twv6n". PluginName "kubernetes.io/projected", VolumeGidValue ""
Aug 15 00:11:26 addons-877132 kubelet[1633]: I0815 00:11:26.296381 1633 reconciler_common.go:288] "Volume detached for volume \"kube-api-access-twv6n\" (UniqueName: \"kubernetes.io/projected/382b8687-211c-483f-ae46-64db5d2c2738-kube-api-access-twv6n\") on node \"addons-877132\" DevicePath \"\""
Aug 15 00:11:26 addons-877132 kubelet[1633]: I0815 00:11:26.296417 1633 reconciler_common.go:288] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/382b8687-211c-483f-ae46-64db5d2c2738-webhook-cert\") on node \"addons-877132\" DevicePath \"\""
Aug 15 00:11:26 addons-877132 kubelet[1633]: I0815 00:11:26.379158 1633 scope.go:117] "RemoveContainer" containerID="8b6c013e33250c6bcb7d48fe79db0c35b0f196cc41a0c7b7457c198e31fee13d"
Aug 15 00:11:26 addons-877132 kubelet[1633]: I0815 00:11:26.392420 1633 scope.go:117] "RemoveContainer" containerID="8b6c013e33250c6bcb7d48fe79db0c35b0f196cc41a0c7b7457c198e31fee13d"
Aug 15 00:11:26 addons-877132 kubelet[1633]: E0815 00:11:26.392651 1633 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8b6c013e33250c6bcb7d48fe79db0c35b0f196cc41a0c7b7457c198e31fee13d\": container with ID starting with 8b6c013e33250c6bcb7d48fe79db0c35b0f196cc41a0c7b7457c198e31fee13d not found: ID does not exist" containerID="8b6c013e33250c6bcb7d48fe79db0c35b0f196cc41a0c7b7457c198e31fee13d"
Aug 15 00:11:26 addons-877132 kubelet[1633]: I0815 00:11:26.392676 1633 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8b6c013e33250c6bcb7d48fe79db0c35b0f196cc41a0c7b7457c198e31fee13d"} err="failed to get container status \"8b6c013e33250c6bcb7d48fe79db0c35b0f196cc41a0c7b7457c198e31fee13d\": rpc error: code = NotFound desc = could not find container \"8b6c013e33250c6bcb7d48fe79db0c35b0f196cc41a0c7b7457c198e31fee13d\": container with ID starting with 8b6c013e33250c6bcb7d48fe79db0c35b0f196cc41a0c7b7457c198e31fee13d not found: ID does not exist"
Aug 15 00:11:28 addons-877132 kubelet[1633]: I0815 00:11:28.071758 1633 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="382b8687-211c-483f-ae46-64db5d2c2738" path="/var/lib/kubelet/pods/382b8687-211c-483f-ae46-64db5d2c2738/volumes"
Aug 15 00:11:30 addons-877132 kubelet[1633]: E0815 00:11:30.311237 1633 eviction_manager.go:257] "Eviction manager: failed to get HasDedicatedImageFs" err="missing image stats: &ImageFsInfoResponse{ImageFilesystems:[]*FilesystemUsage{&FilesystemUsage{Timestamp:1723680690311018129,FsId:&FilesystemIdentifier{Mountpoint:/var/lib/containers/storage/overlay-images,},UsedBytes:&UInt64Value{Value:613264,},InodesUsed:&UInt64Value{Value:247,},},},ContainerFilesystems:[]*FilesystemUsage{},}"
Aug 15 00:11:30 addons-877132 kubelet[1633]: E0815 00:11:30.311269 1633 eviction_manager.go:212] "Eviction manager: failed to synchronize" err="eviction manager: failed to get HasDedicatedImageFs: missing image stats: &ImageFsInfoResponse{ImageFilesystems:[]*FilesystemUsage{&FilesystemUsage{Timestamp:1723680690311018129,FsId:&FilesystemIdentifier{Mountpoint:/var/lib/containers/storage/overlay-images,},UsedBytes:&UInt64Value{Value:613264,},InodesUsed:&UInt64Value{Value:247,},},},ContainerFilesystems:[]*FilesystemUsage{},}"
==> storage-provisioner [03e7fb303164d2ef427adb835d31e224473b8f74e6cf70ad41f8bf76d02c9292] <==
I0815 00:06:24.994803 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I0815 00:06:25.003166 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I0815 00:06:25.003199 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I0815 00:06:25.009325 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I0815 00:06:25.009364 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"7744846a-ef9e-42e9-90e6-1e26a8341167", APIVersion:"v1", ResourceVersion:"943", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' addons-877132_ee44d2d3-4c44-4a23-bef6-1d5ee9ac4c4c became leader
I0815 00:06:25.009462 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_addons-877132_ee44d2d3-4c44-4a23-bef6-1d5ee9ac4c4c!
I0815 00:06:25.110558 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_addons-877132_ee44d2d3-4c44-4a23-bef6-1d5ee9ac4c4c!
-- /stdout --
helpers_test.go:254: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p addons-877132 -n addons-877132
helpers_test.go:261: (dbg) Run: kubectl --context addons-877132 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:285: <<< TestAddons/parallel/Ingress FAILED: end of post-mortem logs <<<
helpers_test.go:286: ---------------------/post-mortem---------------------------------
--- FAIL: TestAddons/parallel/Ingress (150.46s)