=== RUN TestAddons/parallel/Ingress
=== PAUSE TestAddons/parallel/Ingress
=== CONT TestAddons/parallel/Ingress
addons_test.go:209: (dbg) Run: kubectl --context addons-134601 wait --for=condition=ready --namespace=ingress-nginx pod --selector=app.kubernetes.io/component=controller --timeout=90s
addons_test.go:234: (dbg) Run: kubectl --context addons-134601 replace --force -f testdata/nginx-ingress-v1.yaml
addons_test.go:247: (dbg) Run: kubectl --context addons-134601 replace --force -f testdata/nginx-pod-svc.yaml
addons_test.go:252: (dbg) TestAddons/parallel/Ingress: waiting 8m0s for pods matching "run=nginx" in namespace "default" ...
helpers_test.go:344: "nginx" [a36d1bf1-332f-4b0c-ad12-aa1c7e879d9b] Pending / Ready:ContainersNotReady (containers with unready status: [nginx]) / ContainersReady:ContainersNotReady (containers with unready status: [nginx])
helpers_test.go:344: "nginx" [a36d1bf1-332f-4b0c-ad12-aa1c7e879d9b] Running
addons_test.go:252: (dbg) TestAddons/parallel/Ingress: run=nginx healthy within 8.003525526s
addons_test.go:264: (dbg) Run: out/minikube-linux-arm64 -p addons-134601 ssh "curl -s http://127.0.0.1/ -H 'Host: nginx.example.com'"
addons_test.go:288: (dbg) Run: kubectl --context addons-134601 replace --force -f testdata/ingress-dns-example-v1.yaml
addons_test.go:293: (dbg) Run: out/minikube-linux-arm64 -p addons-134601 ip
addons_test.go:299: (dbg) Run: nslookup hello-john.test 192.168.49.2
addons_test.go:299: (dbg) Non-zero exit: nslookup hello-john.test 192.168.49.2: exit status 1 (15.060737817s)
-- stdout --
;; connection timed out; no servers could be reached
-- /stdout --
addons_test.go:301: failed to nslookup hello-john.test host. args "nslookup hello-john.test 192.168.49.2" : exit status 1
addons_test.go:305: unexpected output from nslookup. stdout: ;; connection timed out; no servers could be reached
stderr:
addons_test.go:308: (dbg) Run: out/minikube-linux-arm64 -p addons-134601 addons disable ingress-dns --alsologtostderr -v=1
addons_test.go:308: (dbg) Done: out/minikube-linux-arm64 -p addons-134601 addons disable ingress-dns --alsologtostderr -v=1: (1.146268131s)
addons_test.go:313: (dbg) Run: out/minikube-linux-arm64 -p addons-134601 addons disable ingress --alsologtostderr -v=1
addons_test.go:313: (dbg) Done: out/minikube-linux-arm64 -p addons-134601 addons disable ingress --alsologtostderr -v=1: (7.735286876s)
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:230: ======> post-mortem[TestAddons/parallel/Ingress]: docker inspect <======
helpers_test.go:231: (dbg) Run: docker inspect addons-134601
helpers_test.go:235: (dbg) docker inspect addons-134601:
-- stdout --
[
{
"Id": "b66644773df38e1f944cd9afaef13e4e0a73693afd12db12fa91abe1e2ad43e4",
"Created": "2024-06-17T11:36:20.171228922Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 692357,
"ExitCode": 0,
"Error": "",
"StartedAt": "2024-06-17T11:36:20.466971903Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:d36081176f43c9443534fbd23d834d14507b037430e066481145283247762ade",
"ResolvConfPath": "/var/lib/docker/containers/b66644773df38e1f944cd9afaef13e4e0a73693afd12db12fa91abe1e2ad43e4/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/b66644773df38e1f944cd9afaef13e4e0a73693afd12db12fa91abe1e2ad43e4/hostname",
"HostsPath": "/var/lib/docker/containers/b66644773df38e1f944cd9afaef13e4e0a73693afd12db12fa91abe1e2ad43e4/hosts",
"LogPath": "/var/lib/docker/containers/b66644773df38e1f944cd9afaef13e4e0a73693afd12db12fa91abe1e2ad43e4/b66644773df38e1f944cd9afaef13e4e0a73693afd12db12fa91abe1e2ad43e4-json.log",
"Name": "/addons-134601",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"addons-134601:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {}
},
"NetworkMode": "addons-134601",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 4194304000,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 8388608000,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"LowerDir": "/var/lib/docker/overlay2/6792451e58e7e46a9a1769026ff397eb6b738b9a078679877aa89e3dad14a06e-init/diff:/var/lib/docker/overlay2/c07c2f412fc737ec224babdeaebc84a76c392761a424a81f6ee0a5caa5d8373f/diff",
"MergedDir": "/var/lib/docker/overlay2/6792451e58e7e46a9a1769026ff397eb6b738b9a078679877aa89e3dad14a06e/merged",
"UpperDir": "/var/lib/docker/overlay2/6792451e58e7e46a9a1769026ff397eb6b738b9a078679877aa89e3dad14a06e/diff",
"WorkDir": "/var/lib/docker/overlay2/6792451e58e7e46a9a1769026ff397eb6b738b9a078679877aa89e3dad14a06e/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "addons-134601",
"Source": "/var/lib/docker/volumes/addons-134601/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "addons-134601",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718296336-19068@sha256:b31b1f456eebc10b590403d2cc052bb20a70156f4629e3514cbb38ecd550e2c8",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "addons-134601",
"name.minikube.sigs.k8s.io": "addons-134601",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "5613be09e0f93124dfa47f8447c2e4db678798ab34d6d668314304667ee64086",
"SandboxKey": "/var/run/docker/netns/5613be09e0f9",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33537"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33536"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33533"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33535"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "33534"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"addons-134601": {
"IPAMConfig": {
"IPv4Address": "192.168.49.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "02:42:c0:a8:31:02",
"NetworkID": "080dd99e834a11070ccfccd4e76755df6a40d19a84e7196555666583eb000a1d",
"EndpointID": "f268cf856b5e532f3bb436a09d8678af25a2a4c6efd7e43973316829ac9bfe98",
"Gateway": "192.168.49.1",
"IPAddress": "192.168.49.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DriverOpts": null,
"DNSNames": [
"addons-134601",
"b66644773df3"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:239: (dbg) Run: out/minikube-linux-arm64 status --format={{.Host}} -p addons-134601 -n addons-134601
helpers_test.go:244: <<< TestAddons/parallel/Ingress FAILED: start of post-mortem logs <<<
helpers_test.go:245: ======> post-mortem[TestAddons/parallel/Ingress]: minikube logs <======
helpers_test.go:247: (dbg) Run: out/minikube-linux-arm64 -p addons-134601 logs -n 25
helpers_test.go:247: (dbg) Done: out/minikube-linux-arm64 -p addons-134601 logs -n 25: (1.401862546s)
helpers_test.go:252: TestAddons/parallel/Ingress logs:
-- stdout --
==> Audit <==
|---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
| Command | Args | Profile | User | Version | Start Time | End Time |
|---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
| delete | -p download-only-994764 | download-only-994764 | jenkins | v1.33.1 | 17 Jun 24 11:35 UTC | 17 Jun 24 11:35 UTC |
| delete | -p download-only-968605 | download-only-968605 | jenkins | v1.33.1 | 17 Jun 24 11:35 UTC | 17 Jun 24 11:35 UTC |
| start | --download-only -p | download-docker-079460 | jenkins | v1.33.1 | 17 Jun 24 11:35 UTC | |
| | download-docker-079460 | | | | | |
| | --alsologtostderr | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=containerd | | | | | |
| delete | -p download-docker-079460 | download-docker-079460 | jenkins | v1.33.1 | 17 Jun 24 11:35 UTC | 17 Jun 24 11:35 UTC |
| start | --download-only -p | binary-mirror-788244 | jenkins | v1.33.1 | 17 Jun 24 11:35 UTC | |
| | binary-mirror-788244 | | | | | |
| | --alsologtostderr | | | | | |
| | --binary-mirror | | | | | |
| | http://127.0.0.1:36423 | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=containerd | | | | | |
| delete | -p binary-mirror-788244 | binary-mirror-788244 | jenkins | v1.33.1 | 17 Jun 24 11:35 UTC | 17 Jun 24 11:35 UTC |
| addons | enable dashboard -p | addons-134601 | jenkins | v1.33.1 | 17 Jun 24 11:35 UTC | |
| | addons-134601 | | | | | |
| addons | disable dashboard -p | addons-134601 | jenkins | v1.33.1 | 17 Jun 24 11:35 UTC | |
| | addons-134601 | | | | | |
| start | -p addons-134601 --wait=true | addons-134601 | jenkins | v1.33.1 | 17 Jun 24 11:35 UTC | 17 Jun 24 11:38 UTC |
| | --memory=4000 --alsologtostderr | | | | | |
| | --addons=registry | | | | | |
| | --addons=metrics-server | | | | | |
| | --addons=volumesnapshots | | | | | |
| | --addons=csi-hostpath-driver | | | | | |
| | --addons=gcp-auth | | | | | |
| | --addons=cloud-spanner | | | | | |
| | --addons=inspektor-gadget | | | | | |
| | --addons=storage-provisioner-rancher | | | | | |
| | --addons=nvidia-device-plugin | | | | | |
| | --addons=yakd --addons=volcano | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=containerd | | | | | |
| | --addons=ingress | | | | | |
| | --addons=ingress-dns | | | | | |
| addons | enable headlamp | addons-134601 | jenkins | v1.33.1 | 17 Jun 24 11:38 UTC | 17 Jun 24 11:38 UTC |
| | -p addons-134601 | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| ip | addons-134601 ip | addons-134601 | jenkins | v1.33.1 | 17 Jun 24 11:38 UTC | 17 Jun 24 11:38 UTC |
| addons | addons-134601 addons disable | addons-134601 | jenkins | v1.33.1 | 17 Jun 24 11:38 UTC | 17 Jun 24 11:38 UTC |
| | registry --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| addons | disable nvidia-device-plugin | addons-134601 | jenkins | v1.33.1 | 17 Jun 24 11:38 UTC | 17 Jun 24 11:38 UTC |
| | -p addons-134601 | | | | | |
| ssh | addons-134601 ssh cat | addons-134601 | jenkins | v1.33.1 | 17 Jun 24 11:39 UTC | 17 Jun 24 11:39 UTC |
| | /opt/local-path-provisioner/pvc-cb684f52-f0cd-415f-a4e5-c14b80d7b47b_default_test-pvc/file1 | | | | | |
| addons | addons-134601 addons disable | addons-134601 | jenkins | v1.33.1 | 17 Jun 24 11:39 UTC | 17 Jun 24 11:39 UTC |
| | storage-provisioner-rancher | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | disable cloud-spanner -p | addons-134601 | jenkins | v1.33.1 | 17 Jun 24 11:39 UTC | 17 Jun 24 11:39 UTC |
| | addons-134601 | | | | | |
| addons | addons-134601 addons | addons-134601 | jenkins | v1.33.1 | 17 Jun 24 11:40 UTC | 17 Jun 24 11:40 UTC |
| | disable csi-hostpath-driver | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-134601 addons | addons-134601 | jenkins | v1.33.1 | 17 Jun 24 11:40 UTC | 17 Jun 24 11:40 UTC |
| | disable volumesnapshots | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | disable inspektor-gadget -p | addons-134601 | jenkins | v1.33.1 | 17 Jun 24 11:40 UTC | 17 Jun 24 11:40 UTC |
| | addons-134601 | | | | | |
| addons | addons-134601 addons | addons-134601 | jenkins | v1.33.1 | 17 Jun 24 11:41 UTC | 17 Jun 24 11:41 UTC |
| | disable metrics-server | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| ssh | addons-134601 ssh curl -s | addons-134601 | jenkins | v1.33.1 | 17 Jun 24 11:41 UTC | 17 Jun 24 11:41 UTC |
| | http://127.0.0.1/ -H 'Host: | | | | | |
| | nginx.example.com' | | | | | |
| ip | addons-134601 ip | addons-134601 | jenkins | v1.33.1 | 17 Jun 24 11:41 UTC | 17 Jun 24 11:41 UTC |
| addons | addons-134601 addons disable | addons-134601 | jenkins | v1.33.1 | 17 Jun 24 11:41 UTC | 17 Jun 24 11:41 UTC |
| | volcano --alsologtostderr -v=1 | | | | | |
| addons | addons-134601 addons disable | addons-134601 | jenkins | v1.33.1 | 17 Jun 24 11:41 UTC | 17 Jun 24 11:41 UTC |
| | ingress-dns --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| addons | addons-134601 addons disable | addons-134601 | jenkins | v1.33.1 | 17 Jun 24 11:41 UTC | 17 Jun 24 11:41 UTC |
| | ingress --alsologtostderr -v=1 | | | | | |
|---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
==> Last Start <==
Log file created at: 2024/06/17 11:35:55
Running on machine: ip-172-31-30-239
Binary: Built with gc go1.22.3 for linux/arm64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I0617 11:35:55.192447 691879 out.go:291] Setting OutFile to fd 1 ...
I0617 11:35:55.192927 691879 out.go:338] TERM=,COLORTERM=, which probably does not support color
I0617 11:35:55.192948 691879 out.go:304] Setting ErrFile to fd 2...
I0617 11:35:55.192954 691879 out.go:338] TERM=,COLORTERM=, which probably does not support color
I0617 11:35:55.193286 691879 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19084-685849/.minikube/bin
I0617 11:35:55.193820 691879 out.go:298] Setting JSON to false
I0617 11:35:55.194912 691879 start.go:129] hostinfo: {"hostname":"ip-172-31-30-239","uptime":11903,"bootTime":1718612253,"procs":155,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1063-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"92f46a7d-c249-4c12-924a-77f64874c910"}
I0617 11:35:55.195026 691879 start.go:139] virtualization:
I0617 11:35:55.197506 691879 out.go:177] * [addons-134601] minikube v1.33.1 on Ubuntu 20.04 (arm64)
I0617 11:35:55.199793 691879 out.go:177] - MINIKUBE_LOCATION=19084
I0617 11:35:55.199837 691879 notify.go:220] Checking for updates...
I0617 11:35:55.202472 691879 out.go:177] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I0617 11:35:55.204885 691879 out.go:177] - KUBECONFIG=/home/jenkins/minikube-integration/19084-685849/kubeconfig
I0617 11:35:55.206801 691879 out.go:177] - MINIKUBE_HOME=/home/jenkins/minikube-integration/19084-685849/.minikube
I0617 11:35:55.208549 691879 out.go:177] - MINIKUBE_BIN=out/minikube-linux-arm64
I0617 11:35:55.210345 691879 out.go:177] - MINIKUBE_FORCE_SYSTEMD=
I0617 11:35:55.212409 691879 driver.go:392] Setting default libvirt URI to qemu:///system
I0617 11:35:55.231465 691879 docker.go:122] docker version: linux-26.1.4:Docker Engine - Community
I0617 11:35:55.231585 691879 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0617 11:35:55.296292 691879 info.go:266] docker info: {ID:6ZPO:QZND:VNGE:LUKL:4Y3K:XELL:AAX4:2GTK:E6LM:MPRN:3ZXR:TTMR Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:29 OomKillDisable:true NGoroutines:45 SystemTime:2024-06-17 11:35:55.287047442 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1063-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:aar
ch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214892544 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-30-239 Labels:[] ExperimentalBuild:false ServerVersion:26.1.4 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:d2d58213f83a351ca8f528a95fbd145f5654e957 Expected:d2d58213f83a351ca8f528a95fbd145f5654e957} RuncCommit:{ID:v1.1.12-0-g51d5e94 Expected:v1.1.12-0-g51d5e94} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErro
rs:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.14.1] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.27.1]] Warnings:<nil>}}
I0617 11:35:55.296406 691879 docker.go:295] overlay module found
I0617 11:35:55.298656 691879 out.go:177] * Using the docker driver based on user configuration
I0617 11:35:55.300522 691879 start.go:297] selected driver: docker
I0617 11:35:55.300547 691879 start.go:901] validating driver "docker" against <nil>
I0617 11:35:55.300560 691879 start.go:912] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I0617 11:35:55.301226 691879 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0617 11:35:55.354309 691879 info.go:266] docker info: {ID:6ZPO:QZND:VNGE:LUKL:4Y3K:XELL:AAX4:2GTK:E6LM:MPRN:3ZXR:TTMR Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:29 OomKillDisable:true NGoroutines:45 SystemTime:2024-06-17 11:35:55.345320229 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1063-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:aar
ch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214892544 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-30-239 Labels:[] ExperimentalBuild:false ServerVersion:26.1.4 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:d2d58213f83a351ca8f528a95fbd145f5654e957 Expected:d2d58213f83a351ca8f528a95fbd145f5654e957} RuncCommit:{ID:v1.1.12-0-g51d5e94 Expected:v1.1.12-0-g51d5e94} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErro
rs:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.14.1] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.27.1]] Warnings:<nil>}}
I0617 11:35:55.354478 691879 start_flags.go:310] no existing cluster config was found, will generate one from the flags
I0617 11:35:55.354725 691879 start_flags.go:947] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0617 11:35:55.356740 691879 out.go:177] * Using Docker driver with root privileges
I0617 11:35:55.358586 691879 cni.go:84] Creating CNI manager for ""
I0617 11:35:55.358610 691879 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I0617 11:35:55.358621 691879 start_flags.go:319] Found "CNI" CNI - setting NetworkPlugin=cni
I0617 11:35:55.358705 691879 start.go:340] cluster config:
{Name:addons-134601 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718296336-19068@sha256:b31b1f456eebc10b590403d2cc052bb20a70156f4629e3514cbb38ecd550e2c8 Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.30.1 ClusterName:addons-134601 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime
:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.30.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHA
uthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0617 11:35:55.361198 691879 out.go:177] * Starting "addons-134601" primary control-plane node in "addons-134601" cluster
I0617 11:35:55.363180 691879 cache.go:121] Beginning downloading kic base image for docker with containerd
I0617 11:35:55.365410 691879 out.go:177] * Pulling base image v0.0.44-1718296336-19068 ...
I0617 11:35:55.367161 691879 preload.go:132] Checking if preload exists for k8s version v1.30.1 and runtime containerd
I0617 11:35:55.367214 691879 preload.go:147] Found local preload: /home/jenkins/minikube-integration/19084-685849/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.30.1-containerd-overlay2-arm64.tar.lz4
I0617 11:35:55.367228 691879 cache.go:56] Caching tarball of preloaded images
I0617 11:35:55.367252 691879 image.go:79] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718296336-19068@sha256:b31b1f456eebc10b590403d2cc052bb20a70156f4629e3514cbb38ecd550e2c8 in local docker daemon
I0617 11:35:55.367324 691879 preload.go:173] Found /home/jenkins/minikube-integration/19084-685849/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.30.1-containerd-overlay2-arm64.tar.lz4 in cache, skipping download
I0617 11:35:55.367334 691879 cache.go:59] Finished verifying existence of preloaded tar for v1.30.1 on containerd
I0617 11:35:55.367694 691879 profile.go:143] Saving config to /home/jenkins/minikube-integration/19084-685849/.minikube/profiles/addons-134601/config.json ...
I0617 11:35:55.367749 691879 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19084-685849/.minikube/profiles/addons-134601/config.json: {Name:mkb08377f469fc1104db1717c4a08d3c7ada3e87 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0617 11:35:55.381897 691879 cache.go:149] Downloading gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718296336-19068@sha256:b31b1f456eebc10b590403d2cc052bb20a70156f4629e3514cbb38ecd550e2c8 to local cache
I0617 11:35:55.382021 691879 image.go:63] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718296336-19068@sha256:b31b1f456eebc10b590403d2cc052bb20a70156f4629e3514cbb38ecd550e2c8 in local cache directory
I0617 11:35:55.382044 691879 image.go:66] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718296336-19068@sha256:b31b1f456eebc10b590403d2cc052bb20a70156f4629e3514cbb38ecd550e2c8 in local cache directory, skipping pull
I0617 11:35:55.382052 691879 image.go:105] gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718296336-19068@sha256:b31b1f456eebc10b590403d2cc052bb20a70156f4629e3514cbb38ecd550e2c8 exists in cache, skipping pull
I0617 11:35:55.382060 691879 cache.go:152] successfully saved gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718296336-19068@sha256:b31b1f456eebc10b590403d2cc052bb20a70156f4629e3514cbb38ecd550e2c8 as a tarball
I0617 11:35:55.382067 691879 cache.go:162] Loading gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718296336-19068@sha256:b31b1f456eebc10b590403d2cc052bb20a70156f4629e3514cbb38ecd550e2c8 from local cache
I0617 11:36:12.587150 691879 cache.go:164] successfully loaded and using gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718296336-19068@sha256:b31b1f456eebc10b590403d2cc052bb20a70156f4629e3514cbb38ecd550e2c8 from cached tarball
I0617 11:36:12.587192 691879 cache.go:194] Successfully downloaded all kic artifacts
I0617 11:36:12.587234 691879 start.go:360] acquireMachinesLock for addons-134601: {Name:mk09459f563f6a675c00ab1b69c0357e1205feb9 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0617 11:36:12.587359 691879 start.go:364] duration metric: took 101.75µs to acquireMachinesLock for "addons-134601"
I0617 11:36:12.587389 691879 start.go:93] Provisioning new machine with config: &{Name:addons-134601 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718296336-19068@sha256:b31b1f456eebc10b590403d2cc052bb20a70156f4629e3514cbb38ecd550e2c8 Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.30.1 ClusterName:addons-134601 Namespace:default APIServerHAVIP: APIServerName:min
ikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.30.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:fa
lse CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.30.1 ContainerRuntime:containerd ControlPlane:true Worker:true}
I0617 11:36:12.587531 691879 start.go:125] createHost starting for "" (driver="docker")
I0617 11:36:12.589912 691879 out.go:204] * Creating docker container (CPUs=2, Memory=4000MB) ...
I0617 11:36:12.590182 691879 start.go:159] libmachine.API.Create for "addons-134601" (driver="docker")
I0617 11:36:12.590216 691879 client.go:168] LocalClient.Create starting
I0617 11:36:12.590322 691879 main.go:141] libmachine: Creating CA: /home/jenkins/minikube-integration/19084-685849/.minikube/certs/ca.pem
I0617 11:36:13.676399 691879 main.go:141] libmachine: Creating client certificate: /home/jenkins/minikube-integration/19084-685849/.minikube/certs/cert.pem
I0617 11:36:14.290096 691879 cli_runner.go:164] Run: docker network inspect addons-134601 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W0617 11:36:14.305139 691879 cli_runner.go:211] docker network inspect addons-134601 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I0617 11:36:14.305221 691879 network_create.go:281] running [docker network inspect addons-134601] to gather additional debugging logs...
I0617 11:36:14.305243 691879 cli_runner.go:164] Run: docker network inspect addons-134601
W0617 11:36:14.319894 691879 cli_runner.go:211] docker network inspect addons-134601 returned with exit code 1
I0617 11:36:14.319927 691879 network_create.go:284] error running [docker network inspect addons-134601]: docker network inspect addons-134601: exit status 1
stdout:
[]
stderr:
Error response from daemon: network addons-134601 not found
I0617 11:36:14.319941 691879 network_create.go:286] output of [docker network inspect addons-134601]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network addons-134601 not found
** /stderr **
I0617 11:36:14.320055 691879 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0617 11:36:14.334621 691879 network.go:206] using free private subnet 192.168.49.0/24: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0x40004e0010}
I0617 11:36:14.334663 691879 network_create.go:124] attempt to create docker network addons-134601 192.168.49.0/24 with gateway 192.168.49.1 and MTU of 1500 ...
I0617 11:36:14.334721 691879 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.49.0/24 --gateway=192.168.49.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=addons-134601 addons-134601
I0617 11:36:14.408328 691879 network_create.go:108] docker network addons-134601 192.168.49.0/24 created
I0617 11:36:14.408359 691879 kic.go:121] calculated static IP "192.168.49.2" for the "addons-134601" container
I0617 11:36:14.408437 691879 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0617 11:36:14.422874 691879 cli_runner.go:164] Run: docker volume create addons-134601 --label name.minikube.sigs.k8s.io=addons-134601 --label created_by.minikube.sigs.k8s.io=true
I0617 11:36:14.438772 691879 oci.go:103] Successfully created a docker volume addons-134601
I0617 11:36:14.438870 691879 cli_runner.go:164] Run: docker run --rm --name addons-134601-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-134601 --entrypoint /usr/bin/test -v addons-134601:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718296336-19068@sha256:b31b1f456eebc10b590403d2cc052bb20a70156f4629e3514cbb38ecd550e2c8 -d /var/lib
I0617 11:36:15.949810 691879 cli_runner.go:217] Completed: docker run --rm --name addons-134601-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-134601 --entrypoint /usr/bin/test -v addons-134601:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718296336-19068@sha256:b31b1f456eebc10b590403d2cc052bb20a70156f4629e3514cbb38ecd550e2c8 -d /var/lib: (1.510886406s)
I0617 11:36:15.949837 691879 oci.go:107] Successfully prepared a docker volume addons-134601
I0617 11:36:15.949861 691879 preload.go:132] Checking if preload exists for k8s version v1.30.1 and runtime containerd
I0617 11:36:15.949881 691879 kic.go:194] Starting extracting preloaded images to volume ...
I0617 11:36:15.949978 691879 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/19084-685849/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.30.1-containerd-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v addons-134601:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718296336-19068@sha256:b31b1f456eebc10b590403d2cc052bb20a70156f4629e3514cbb38ecd550e2c8 -I lz4 -xf /preloaded.tar -C /extractDir
I0617 11:36:20.107495 691879 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/19084-685849/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.30.1-containerd-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v addons-134601:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718296336-19068@sha256:b31b1f456eebc10b590403d2cc052bb20a70156f4629e3514cbb38ecd550e2c8 -I lz4 -xf /preloaded.tar -C /extractDir: (4.157472664s)
I0617 11:36:20.107533 691879 kic.go:203] duration metric: took 4.157647085s to extract preloaded images to volume ...
W0617 11:36:20.107715 691879 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I0617 11:36:20.107836 691879 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0617 11:36:20.156985 691879 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname addons-134601 --name addons-134601 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-134601 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=addons-134601 --network addons-134601 --ip 192.168.49.2 --volume addons-134601:/var --security-opt apparmor=unconfined --memory=4000mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718296336-19068@sha256:b31b1f456eebc10b590403d2cc052bb20a70156f4629e3514cbb38ecd550e2c8
I0617 11:36:20.474916 691879 cli_runner.go:164] Run: docker container inspect addons-134601 --format={{.State.Running}}
I0617 11:36:20.501230 691879 cli_runner.go:164] Run: docker container inspect addons-134601 --format={{.State.Status}}
I0617 11:36:20.522729 691879 cli_runner.go:164] Run: docker exec addons-134601 stat /var/lib/dpkg/alternatives/iptables
I0617 11:36:20.585999 691879 oci.go:144] the created container "addons-134601" has a running status.
I0617 11:36:20.586025 691879 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/19084-685849/.minikube/machines/addons-134601/id_rsa...
I0617 11:36:20.998617 691879 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/19084-685849/.minikube/machines/addons-134601/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0617 11:36:21.045805 691879 cli_runner.go:164] Run: docker container inspect addons-134601 --format={{.State.Status}}
I0617 11:36:21.077013 691879 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0617 11:36:21.077031 691879 kic_runner.go:114] Args: [docker exec --privileged addons-134601 chown docker:docker /home/docker/.ssh/authorized_keys]
I0617 11:36:21.154204 691879 cli_runner.go:164] Run: docker container inspect addons-134601 --format={{.State.Status}}
I0617 11:36:21.178000 691879 machine.go:94] provisionDockerMachine start ...
I0617 11:36:21.178087 691879 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-134601
I0617 11:36:21.202309 691879 main.go:141] libmachine: Using SSH client type: native
I0617 11:36:21.202574 691879 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e2bb0] 0x3e5410 <nil> [] 0s} 127.0.0.1 33537 <nil> <nil>}
I0617 11:36:21.202584 691879 main.go:141] libmachine: About to run SSH command:
hostname
I0617 11:36:21.351164 691879 main.go:141] libmachine: SSH cmd err, output: <nil>: addons-134601
I0617 11:36:21.351228 691879 ubuntu.go:169] provisioning hostname "addons-134601"
I0617 11:36:21.351321 691879 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-134601
I0617 11:36:21.373088 691879 main.go:141] libmachine: Using SSH client type: native
I0617 11:36:21.373323 691879 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e2bb0] 0x3e5410 <nil> [] 0s} 127.0.0.1 33537 <nil> <nil>}
I0617 11:36:21.373335 691879 main.go:141] libmachine: About to run SSH command:
sudo hostname addons-134601 && echo "addons-134601" | sudo tee /etc/hostname
I0617 11:36:21.517641 691879 main.go:141] libmachine: SSH cmd err, output: <nil>: addons-134601
I0617 11:36:21.517812 691879 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-134601
I0617 11:36:21.537281 691879 main.go:141] libmachine: Using SSH client type: native
I0617 11:36:21.537514 691879 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e2bb0] 0x3e5410 <nil> [] 0s} 127.0.0.1 33537 <nil> <nil>}
I0617 11:36:21.537530 691879 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\saddons-134601' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 addons-134601/g' /etc/hosts;
else
echo '127.0.1.1 addons-134601' | sudo tee -a /etc/hosts;
fi
fi
I0617 11:36:21.671535 691879 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0617 11:36:21.671566 691879 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/19084-685849/.minikube CaCertPath:/home/jenkins/minikube-integration/19084-685849/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/19084-685849/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/19084-685849/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/19084-685849/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/19084-685849/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/19084-685849/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/19084-685849/.minikube}
I0617 11:36:21.671597 691879 ubuntu.go:177] setting up certificates
I0617 11:36:21.671607 691879 provision.go:84] configureAuth start
I0617 11:36:21.671672 691879 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-134601
I0617 11:36:21.687707 691879 provision.go:143] copyHostCerts
I0617 11:36:21.687803 691879 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19084-685849/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/19084-685849/.minikube/key.pem (1679 bytes)
I0617 11:36:21.687922 691879 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19084-685849/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/19084-685849/.minikube/ca.pem (1078 bytes)
I0617 11:36:21.687989 691879 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19084-685849/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/19084-685849/.minikube/cert.pem (1123 bytes)
I0617 11:36:21.688066 691879 provision.go:117] generating server cert: /home/jenkins/minikube-integration/19084-685849/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/19084-685849/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/19084-685849/.minikube/certs/ca-key.pem org=jenkins.addons-134601 san=[127.0.0.1 192.168.49.2 addons-134601 localhost minikube]
I0617 11:36:22.826781 691879 provision.go:177] copyRemoteCerts
I0617 11:36:22.826872 691879 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0617 11:36:22.826915 691879 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-134601
I0617 11:36:22.844666 691879 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33537 SSHKeyPath:/home/jenkins/minikube-integration/19084-685849/.minikube/machines/addons-134601/id_rsa Username:docker}
I0617 11:36:22.936111 691879 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19084-685849/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I0617 11:36:22.959704 691879 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19084-685849/.minikube/machines/server.pem --> /etc/docker/server.pem (1208 bytes)
I0617 11:36:22.983379 691879 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19084-685849/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I0617 11:36:23.008848 691879 provision.go:87] duration metric: took 1.337222833s to configureAuth
I0617 11:36:23.008876 691879 ubuntu.go:193] setting minikube options for container-runtime
I0617 11:36:23.009071 691879 config.go:182] Loaded profile config "addons-134601": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.30.1
I0617 11:36:23.009084 691879 machine.go:97] duration metric: took 1.83106832s to provisionDockerMachine
I0617 11:36:23.009092 691879 client.go:171] duration metric: took 10.418865321s to LocalClient.Create
I0617 11:36:23.009112 691879 start.go:167] duration metric: took 10.418930772s to libmachine.API.Create "addons-134601"
I0617 11:36:23.009122 691879 start.go:293] postStartSetup for "addons-134601" (driver="docker")
I0617 11:36:23.009132 691879 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0617 11:36:23.009195 691879 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0617 11:36:23.009253 691879 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-134601
I0617 11:36:23.025337 691879 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33537 SSHKeyPath:/home/jenkins/minikube-integration/19084-685849/.minikube/machines/addons-134601/id_rsa Username:docker}
I0617 11:36:23.116380 691879 ssh_runner.go:195] Run: cat /etc/os-release
I0617 11:36:23.119505 691879 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0617 11:36:23.119545 691879 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0617 11:36:23.119581 691879 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0617 11:36:23.119590 691879 info.go:137] Remote host: Ubuntu 22.04.4 LTS
I0617 11:36:23.119601 691879 filesync.go:126] Scanning /home/jenkins/minikube-integration/19084-685849/.minikube/addons for local assets ...
I0617 11:36:23.119688 691879 filesync.go:126] Scanning /home/jenkins/minikube-integration/19084-685849/.minikube/files for local assets ...
I0617 11:36:23.119716 691879 start.go:296] duration metric: took 110.586908ms for postStartSetup
I0617 11:36:23.120037 691879 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-134601
I0617 11:36:23.135166 691879 profile.go:143] Saving config to /home/jenkins/minikube-integration/19084-685849/.minikube/profiles/addons-134601/config.json ...
I0617 11:36:23.135508 691879 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0617 11:36:23.135580 691879 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-134601
I0617 11:36:23.151365 691879 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33537 SSHKeyPath:/home/jenkins/minikube-integration/19084-685849/.minikube/machines/addons-134601/id_rsa Username:docker}
I0617 11:36:23.240613 691879 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0617 11:36:23.245009 691879 start.go:128] duration metric: took 10.657462021s to createHost
I0617 11:36:23.245033 691879 start.go:83] releasing machines lock for "addons-134601", held for 10.657662484s
I0617 11:36:23.245103 691879 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-134601
I0617 11:36:23.263579 691879 ssh_runner.go:195] Run: cat /version.json
I0617 11:36:23.263629 691879 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-134601
I0617 11:36:23.263695 691879 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0617 11:36:23.263743 691879 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-134601
I0617 11:36:23.284903 691879 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33537 SSHKeyPath:/home/jenkins/minikube-integration/19084-685849/.minikube/machines/addons-134601/id_rsa Username:docker}
I0617 11:36:23.287610 691879 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33537 SSHKeyPath:/home/jenkins/minikube-integration/19084-685849/.minikube/machines/addons-134601/id_rsa Username:docker}
I0617 11:36:23.370750 691879 ssh_runner.go:195] Run: systemctl --version
I0617 11:36:23.494078 691879 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0617 11:36:23.498212 691879 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I0617 11:36:23.522009 691879 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I0617 11:36:23.522126 691879 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%!p(MISSING), " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0617 11:36:23.549243 691879 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I0617 11:36:23.549277 691879 start.go:494] detecting cgroup driver to use...
I0617 11:36:23.549327 691879 detect.go:196] detected "cgroupfs" cgroup driver on host os
I0617 11:36:23.549403 691879 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I0617 11:36:23.562005 691879 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0617 11:36:23.573234 691879 docker.go:217] disabling cri-docker service (if available) ...
I0617 11:36:23.573351 691879 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I0617 11:36:23.587198 691879 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I0617 11:36:23.601680 691879 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I0617 11:36:23.683967 691879 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I0617 11:36:23.771755 691879 docker.go:233] disabling docker service ...
I0617 11:36:23.771824 691879 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I0617 11:36:23.792468 691879 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I0617 11:36:23.804806 691879 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I0617 11:36:23.895367 691879 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I0617 11:36:23.997727 691879 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I0617 11:36:24.011850 691879 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %!s(MISSING) "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0617 11:36:24.032872 691879 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.9"|' /etc/containerd/config.toml"
I0617 11:36:24.045213 691879 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0617 11:36:24.056856 691879 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I0617 11:36:24.056933 691879 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I0617 11:36:24.068842 691879 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0617 11:36:24.080200 691879 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0617 11:36:24.092330 691879 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0617 11:36:24.102858 691879 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0617 11:36:24.113320 691879 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0617 11:36:24.125495 691879 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0617 11:36:24.136219 691879 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0617 11:36:24.146430 691879 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0617 11:36:24.155889 691879 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0617 11:36:24.164764 691879 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0617 11:36:24.249627 691879 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0617 11:36:24.374905 691879 start.go:541] Will wait 60s for socket path /run/containerd/containerd.sock
I0617 11:36:24.374993 691879 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I0617 11:36:24.378596 691879 start.go:562] Will wait 60s for crictl version
I0617 11:36:24.378700 691879 ssh_runner.go:195] Run: which crictl
I0617 11:36:24.381951 691879 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0617 11:36:24.425400 691879 start.go:578] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: 1.6.33
RuntimeApiVersion: v1
I0617 11:36:24.425539 691879 ssh_runner.go:195] Run: containerd --version
I0617 11:36:24.447065 691879 ssh_runner.go:195] Run: containerd --version
I0617 11:36:24.471612 691879 out.go:177] * Preparing Kubernetes v1.30.1 on containerd 1.6.33 ...
I0617 11:36:24.473460 691879 cli_runner.go:164] Run: docker network inspect addons-134601 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0617 11:36:24.490521 691879 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I0617 11:36:24.494144 691879 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0617 11:36:24.505074 691879 kubeadm.go:877] updating cluster {Name:addons-134601 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718296336-19068@sha256:b31b1f456eebc10b590403d2cc052bb20a70156f4629e3514cbb38ecd550e2c8 Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.30.1 ClusterName:addons-134601 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNa
mes:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.30.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false Cus
tomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I0617 11:36:24.505196 691879 preload.go:132] Checking if preload exists for k8s version v1.30.1 and runtime containerd
I0617 11:36:24.505262 691879 ssh_runner.go:195] Run: sudo crictl images --output json
I0617 11:36:24.540893 691879 containerd.go:627] all images are preloaded for containerd runtime.
I0617 11:36:24.540917 691879 containerd.go:534] Images already preloaded, skipping extraction
I0617 11:36:24.540980 691879 ssh_runner.go:195] Run: sudo crictl images --output json
I0617 11:36:24.578255 691879 containerd.go:627] all images are preloaded for containerd runtime.
I0617 11:36:24.578275 691879 cache_images.go:84] Images are preloaded, skipping loading
I0617 11:36:24.578282 691879 kubeadm.go:928] updating node { 192.168.49.2 8443 v1.30.1 containerd true true} ...
I0617 11:36:24.578373 691879 kubeadm.go:940] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.30.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=addons-134601 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.2
[Install]
config:
{KubernetesVersion:v1.30.1 ClusterName:addons-134601 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0617 11:36:24.578434 691879 ssh_runner.go:195] Run: sudo crictl info
I0617 11:36:24.616885 691879 cni.go:84] Creating CNI manager for ""
I0617 11:36:24.616908 691879 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I0617 11:36:24.616918 691879 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
I0617 11:36:24.616940 691879 kubeadm.go:181] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8443 KubernetesVersion:v1.30.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:addons-134601 NodeName:addons-134601 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.49.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc
/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I0617 11:36:24.617071 691879 kubeadm.go:187] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.49.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "addons-134601"
kubeletExtraArgs:
node-ip: 192.168.49.2
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.30.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%!"(MISSING)
nodefs.inodesFree: "0%!"(MISSING)
imagefs.available: "0%!"(MISSING)
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I0617 11:36:24.617144 691879 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.30.1
I0617 11:36:24.625700 691879 binaries.go:44] Found k8s binaries, skipping transfer
I0617 11:36:24.625770 691879 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I0617 11:36:24.633970 691879 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (317 bytes)
I0617 11:36:24.651383 691879 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0617 11:36:24.669081 691879 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2167 bytes)
I0617 11:36:24.686945 691879 ssh_runner.go:195] Run: grep 192.168.49.2 control-plane.minikube.internal$ /etc/hosts
I0617 11:36:24.690264 691879 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0617 11:36:24.700746 691879 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0617 11:36:24.794190 691879 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0617 11:36:24.809633 691879 certs.go:68] Setting up /home/jenkins/minikube-integration/19084-685849/.minikube/profiles/addons-134601 for IP: 192.168.49.2
I0617 11:36:24.809652 691879 certs.go:194] generating shared ca certs ...
I0617 11:36:24.809668 691879 certs.go:226] acquiring lock for ca certs: {Name:mkd182a8d082c6d0615c99aed3d4d2e0a9bb102c Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0617 11:36:24.809799 691879 certs.go:240] generating "minikubeCA" ca cert: /home/jenkins/minikube-integration/19084-685849/.minikube/ca.key
I0617 11:36:25.430101 691879 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19084-685849/.minikube/ca.crt ...
I0617 11:36:25.430133 691879 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19084-685849/.minikube/ca.crt: {Name:mk79daf2140a9ebb346032ad8180e82fd0c9bae1 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0617 11:36:25.430329 691879 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19084-685849/.minikube/ca.key ...
I0617 11:36:25.430342 691879 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19084-685849/.minikube/ca.key: {Name:mka740ce75a647c0cf0eb6706d7fda02adc3099f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0617 11:36:25.430439 691879 certs.go:240] generating "proxyClientCA" ca cert: /home/jenkins/minikube-integration/19084-685849/.minikube/proxy-client-ca.key
I0617 11:36:25.823743 691879 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19084-685849/.minikube/proxy-client-ca.crt ...
I0617 11:36:25.823778 691879 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19084-685849/.minikube/proxy-client-ca.crt: {Name:mk95474792d01340dba5cc7fe955e2cd54718da2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0617 11:36:25.824477 691879 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19084-685849/.minikube/proxy-client-ca.key ...
I0617 11:36:25.824495 691879 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19084-685849/.minikube/proxy-client-ca.key: {Name:mk8472fd58e828c410e3a46367411ca5816b8527 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0617 11:36:25.824677 691879 certs.go:256] generating profile certs ...
I0617 11:36:25.824752 691879 certs.go:363] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/19084-685849/.minikube/profiles/addons-134601/client.key
I0617 11:36:25.824770 691879 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19084-685849/.minikube/profiles/addons-134601/client.crt with IP's: []
I0617 11:36:26.529958 691879 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19084-685849/.minikube/profiles/addons-134601/client.crt ...
I0617 11:36:26.529993 691879 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19084-685849/.minikube/profiles/addons-134601/client.crt: {Name:mkc844e9367d9a17363f31eb1cb61e5983015611 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0617 11:36:26.530231 691879 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19084-685849/.minikube/profiles/addons-134601/client.key ...
I0617 11:36:26.530246 691879 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19084-685849/.minikube/profiles/addons-134601/client.key: {Name:mk6dfd46435f0ed78e07ca6d50b69de5d410d3d1 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0617 11:36:26.530343 691879 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/19084-685849/.minikube/profiles/addons-134601/apiserver.key.1f377a19
I0617 11:36:26.530362 691879 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19084-685849/.minikube/profiles/addons-134601/apiserver.crt.1f377a19 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.49.2]
I0617 11:36:26.815156 691879 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19084-685849/.minikube/profiles/addons-134601/apiserver.crt.1f377a19 ...
I0617 11:36:26.815187 691879 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19084-685849/.minikube/profiles/addons-134601/apiserver.crt.1f377a19: {Name:mk32703b2dafc2b81a859e4fb4b3164a3f15b1ae Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0617 11:36:26.815367 691879 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19084-685849/.minikube/profiles/addons-134601/apiserver.key.1f377a19 ...
I0617 11:36:26.815382 691879 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19084-685849/.minikube/profiles/addons-134601/apiserver.key.1f377a19: {Name:mk32df492615f149ef55cadad21499968c388de2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0617 11:36:26.815489 691879 certs.go:381] copying /home/jenkins/minikube-integration/19084-685849/.minikube/profiles/addons-134601/apiserver.crt.1f377a19 -> /home/jenkins/minikube-integration/19084-685849/.minikube/profiles/addons-134601/apiserver.crt
I0617 11:36:26.815574 691879 certs.go:385] copying /home/jenkins/minikube-integration/19084-685849/.minikube/profiles/addons-134601/apiserver.key.1f377a19 -> /home/jenkins/minikube-integration/19084-685849/.minikube/profiles/addons-134601/apiserver.key
I0617 11:36:26.815628 691879 certs.go:363] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/19084-685849/.minikube/profiles/addons-134601/proxy-client.key
I0617 11:36:26.815648 691879 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19084-685849/.minikube/profiles/addons-134601/proxy-client.crt with IP's: []
I0617 11:36:27.049684 691879 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19084-685849/.minikube/profiles/addons-134601/proxy-client.crt ...
I0617 11:36:27.049724 691879 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19084-685849/.minikube/profiles/addons-134601/proxy-client.crt: {Name:mkca9b1d6c742d596941f17d555b795b0bf66f9f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0617 11:36:27.049934 691879 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19084-685849/.minikube/profiles/addons-134601/proxy-client.key ...
I0617 11:36:27.049952 691879 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19084-685849/.minikube/profiles/addons-134601/proxy-client.key: {Name:mk8912b6ff549d26c2625eeea60b702619a66016 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0617 11:36:27.050152 691879 certs.go:484] found cert: /home/jenkins/minikube-integration/19084-685849/.minikube/certs/ca-key.pem (1679 bytes)
I0617 11:36:27.050201 691879 certs.go:484] found cert: /home/jenkins/minikube-integration/19084-685849/.minikube/certs/ca.pem (1078 bytes)
I0617 11:36:27.050233 691879 certs.go:484] found cert: /home/jenkins/minikube-integration/19084-685849/.minikube/certs/cert.pem (1123 bytes)
I0617 11:36:27.050262 691879 certs.go:484] found cert: /home/jenkins/minikube-integration/19084-685849/.minikube/certs/key.pem (1679 bytes)
I0617 11:36:27.050865 691879 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19084-685849/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0617 11:36:27.077592 691879 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19084-685849/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
I0617 11:36:27.104014 691879 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19084-685849/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0617 11:36:27.133588 691879 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19084-685849/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I0617 11:36:27.162301 691879 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19084-685849/.minikube/profiles/addons-134601/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1419 bytes)
I0617 11:36:27.189363 691879 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19084-685849/.minikube/profiles/addons-134601/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
I0617 11:36:27.215174 691879 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19084-685849/.minikube/profiles/addons-134601/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0617 11:36:27.239775 691879 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19084-685849/.minikube/profiles/addons-134601/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I0617 11:36:27.264273 691879 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19084-685849/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0617 11:36:27.288462 691879 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I0617 11:36:27.305806 691879 ssh_runner.go:195] Run: openssl version
I0617 11:36:27.311149 691879 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0617 11:36:27.320636 691879 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0617 11:36:27.323961 691879 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Jun 17 11:36 /usr/share/ca-certificates/minikubeCA.pem
I0617 11:36:27.324021 691879 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0617 11:36:27.330665 691879 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0617 11:36:27.340231 691879 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0617 11:36:27.343380 691879 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I0617 11:36:27.343443 691879 kubeadm.go:391] StartCluster: {Name:addons-134601 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.44-1718296336-19068@sha256:b31b1f456eebc10b590403d2cc052bb20a70156f4629e3514cbb38ecd550e2c8 Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.30.1 ClusterName:addons-134601 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames
:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.30.1 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false Custom
QemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0617 11:36:27.343526 691879 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I0617 11:36:27.343584 691879 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I0617 11:36:27.380764 691879 cri.go:89] found id: ""
I0617 11:36:27.380836 691879 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I0617 11:36:27.389537 691879 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I0617 11:36:27.398169 691879 kubeadm.go:213] ignoring SystemVerification for kubeadm because of docker driver
I0617 11:36:27.398232 691879 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I0617 11:36:27.406806 691879 kubeadm.go:154] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I0617 11:36:27.406829 691879 kubeadm.go:156] found existing configuration files:
I0617 11:36:27.406899 691879 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I0617 11:36:27.415599 691879 kubeadm.go:162] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I0617 11:36:27.415660 691879 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I0617 11:36:27.424049 691879 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I0617 11:36:27.432797 691879 kubeadm.go:162] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I0617 11:36:27.432871 691879 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I0617 11:36:27.441104 691879 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I0617 11:36:27.449676 691879 kubeadm.go:162] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I0617 11:36:27.449737 691879 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I0617 11:36:27.457895 691879 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I0617 11:36:27.466488 691879 kubeadm.go:162] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I0617 11:36:27.466555 691879 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I0617 11:36:27.474979 691879 ssh_runner.go:286] Start: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.30.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I0617 11:36:27.559011 691879 kubeadm.go:309] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1063-aws\n", err: exit status 1
I0617 11:36:27.628718 691879 kubeadm.go:309] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I0617 11:36:46.483256 691879 kubeadm.go:309] [init] Using Kubernetes version: v1.30.1
I0617 11:36:46.483315 691879 kubeadm.go:309] [preflight] Running pre-flight checks
I0617 11:36:46.483399 691879 kubeadm.go:309] [preflight] The system verification failed. Printing the output from the verification:
I0617 11:36:46.483476 691879 kubeadm.go:309] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1063-aws[0m
I0617 11:36:46.483512 691879 kubeadm.go:309] [0;37mOS[0m: [0;32mLinux[0m
I0617 11:36:46.483565 691879 kubeadm.go:309] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I0617 11:36:46.483616 691879 kubeadm.go:309] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I0617 11:36:46.483665 691879 kubeadm.go:309] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I0617 11:36:46.483716 691879 kubeadm.go:309] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I0617 11:36:46.483764 691879 kubeadm.go:309] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I0617 11:36:46.483816 691879 kubeadm.go:309] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I0617 11:36:46.483863 691879 kubeadm.go:309] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I0617 11:36:46.483913 691879 kubeadm.go:309] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I0617 11:36:46.483960 691879 kubeadm.go:309] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I0617 11:36:46.484033 691879 kubeadm.go:309] [preflight] Pulling images required for setting up a Kubernetes cluster
I0617 11:36:46.484127 691879 kubeadm.go:309] [preflight] This might take a minute or two, depending on the speed of your internet connection
I0617 11:36:46.484220 691879 kubeadm.go:309] [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
I0617 11:36:46.484284 691879 kubeadm.go:309] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I0617 11:36:46.486249 691879 out.go:204] - Generating certificates and keys ...
I0617 11:36:46.486338 691879 kubeadm.go:309] [certs] Using existing ca certificate authority
I0617 11:36:46.486406 691879 kubeadm.go:309] [certs] Using existing apiserver certificate and key on disk
I0617 11:36:46.486474 691879 kubeadm.go:309] [certs] Generating "apiserver-kubelet-client" certificate and key
I0617 11:36:46.486534 691879 kubeadm.go:309] [certs] Generating "front-proxy-ca" certificate and key
I0617 11:36:46.486597 691879 kubeadm.go:309] [certs] Generating "front-proxy-client" certificate and key
I0617 11:36:46.486649 691879 kubeadm.go:309] [certs] Generating "etcd/ca" certificate and key
I0617 11:36:46.486707 691879 kubeadm.go:309] [certs] Generating "etcd/server" certificate and key
I0617 11:36:46.486823 691879 kubeadm.go:309] [certs] etcd/server serving cert is signed for DNS names [addons-134601 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0617 11:36:46.486883 691879 kubeadm.go:309] [certs] Generating "etcd/peer" certificate and key
I0617 11:36:46.487000 691879 kubeadm.go:309] [certs] etcd/peer serving cert is signed for DNS names [addons-134601 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0617 11:36:46.487068 691879 kubeadm.go:309] [certs] Generating "etcd/healthcheck-client" certificate and key
I0617 11:36:46.487134 691879 kubeadm.go:309] [certs] Generating "apiserver-etcd-client" certificate and key
I0617 11:36:46.487181 691879 kubeadm.go:309] [certs] Generating "sa" key and public key
I0617 11:36:46.487240 691879 kubeadm.go:309] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I0617 11:36:46.487292 691879 kubeadm.go:309] [kubeconfig] Writing "admin.conf" kubeconfig file
I0617 11:36:46.487350 691879 kubeadm.go:309] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I0617 11:36:46.487404 691879 kubeadm.go:309] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I0617 11:36:46.487513 691879 kubeadm.go:309] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I0617 11:36:46.487582 691879 kubeadm.go:309] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I0617 11:36:46.487680 691879 kubeadm.go:309] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I0617 11:36:46.487763 691879 kubeadm.go:309] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I0617 11:36:46.489639 691879 out.go:204] - Booting up control plane ...
I0617 11:36:46.489747 691879 kubeadm.go:309] [control-plane] Creating static Pod manifest for "kube-apiserver"
I0617 11:36:46.489849 691879 kubeadm.go:309] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I0617 11:36:46.489930 691879 kubeadm.go:309] [control-plane] Creating static Pod manifest for "kube-scheduler"
I0617 11:36:46.490038 691879 kubeadm.go:309] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I0617 11:36:46.490133 691879 kubeadm.go:309] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I0617 11:36:46.490179 691879 kubeadm.go:309] [kubelet-start] Starting the kubelet
I0617 11:36:46.490332 691879 kubeadm.go:309] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I0617 11:36:46.490415 691879 kubeadm.go:309] [kubelet-check] Waiting for a healthy kubelet. This can take up to 4m0s
I0617 11:36:46.490472 691879 kubeadm.go:309] [kubelet-check] The kubelet is healthy after 2.501929769s
I0617 11:36:46.490539 691879 kubeadm.go:309] [api-check] Waiting for a healthy API server. This can take up to 4m0s
I0617 11:36:46.490595 691879 kubeadm.go:309] [api-check] The API server is healthy after 6.00186563s
I0617 11:36:46.490696 691879 kubeadm.go:309] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I0617 11:36:46.490815 691879 kubeadm.go:309] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I0617 11:36:46.490879 691879 kubeadm.go:309] [upload-certs] Skipping phase. Please see --upload-certs
I0617 11:36:46.491054 691879 kubeadm.go:309] [mark-control-plane] Marking the node addons-134601 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I0617 11:36:46.491108 691879 kubeadm.go:309] [bootstrap-token] Using token: yrsv16.mt35ve1y9ihhwy28
I0617 11:36:46.493000 691879 out.go:204] - Configuring RBAC rules ...
I0617 11:36:46.493117 691879 kubeadm.go:309] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I0617 11:36:46.493208 691879 kubeadm.go:309] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I0617 11:36:46.493348 691879 kubeadm.go:309] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I0617 11:36:46.493490 691879 kubeadm.go:309] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I0617 11:36:46.493605 691879 kubeadm.go:309] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I0617 11:36:46.493690 691879 kubeadm.go:309] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I0617 11:36:46.493812 691879 kubeadm.go:309] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I0617 11:36:46.493859 691879 kubeadm.go:309] [addons] Applied essential addon: CoreDNS
I0617 11:36:46.493903 691879 kubeadm.go:309] [addons] Applied essential addon: kube-proxy
I0617 11:36:46.493907 691879 kubeadm.go:309]
I0617 11:36:46.493965 691879 kubeadm.go:309] Your Kubernetes control-plane has initialized successfully!
I0617 11:36:46.493968 691879 kubeadm.go:309]
I0617 11:36:46.494044 691879 kubeadm.go:309] To start using your cluster, you need to run the following as a regular user:
I0617 11:36:46.494048 691879 kubeadm.go:309]
I0617 11:36:46.494073 691879 kubeadm.go:309] mkdir -p $HOME/.kube
I0617 11:36:46.494129 691879 kubeadm.go:309] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I0617 11:36:46.494178 691879 kubeadm.go:309] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I0617 11:36:46.494181 691879 kubeadm.go:309]
I0617 11:36:46.494234 691879 kubeadm.go:309] Alternatively, if you are the root user, you can run:
I0617 11:36:46.494237 691879 kubeadm.go:309]
I0617 11:36:46.494292 691879 kubeadm.go:309] export KUBECONFIG=/etc/kubernetes/admin.conf
I0617 11:36:46.494296 691879 kubeadm.go:309]
I0617 11:36:46.494348 691879 kubeadm.go:309] You should now deploy a pod network to the cluster.
I0617 11:36:46.494420 691879 kubeadm.go:309] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I0617 11:36:46.494485 691879 kubeadm.go:309] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I0617 11:36:46.494490 691879 kubeadm.go:309]
I0617 11:36:46.494571 691879 kubeadm.go:309] You can now join any number of control-plane nodes by copying certificate authorities
I0617 11:36:46.494654 691879 kubeadm.go:309] and service account keys on each node and then running the following as root:
I0617 11:36:46.494668 691879 kubeadm.go:309]
I0617 11:36:46.494751 691879 kubeadm.go:309] kubeadm join control-plane.minikube.internal:8443 --token yrsv16.mt35ve1y9ihhwy28 \
I0617 11:36:46.494857 691879 kubeadm.go:309] --discovery-token-ca-cert-hash sha256:dc4b907f606e2c80144c7b9bd3e930cd226e10982953b09171123a8759c70db4 \
I0617 11:36:46.494877 691879 kubeadm.go:309] --control-plane
I0617 11:36:46.494881 691879 kubeadm.go:309]
I0617 11:36:46.494963 691879 kubeadm.go:309] Then you can join any number of worker nodes by running the following on each as root:
I0617 11:36:46.494967 691879 kubeadm.go:309]
I0617 11:36:46.495047 691879 kubeadm.go:309] kubeadm join control-plane.minikube.internal:8443 --token yrsv16.mt35ve1y9ihhwy28 \
I0617 11:36:46.495183 691879 kubeadm.go:309] --discovery-token-ca-cert-hash sha256:dc4b907f606e2c80144c7b9bd3e930cd226e10982953b09171123a8759c70db4
I0617 11:36:46.495192 691879 cni.go:84] Creating CNI manager for ""
I0617 11:36:46.495199 691879 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I0617 11:36:46.497041 691879 out.go:177] * Configuring CNI (Container Networking Interface) ...
I0617 11:36:46.498690 691879 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I0617 11:36:46.502688 691879 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.30.1/kubectl ...
I0617 11:36:46.502709 691879 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2438 bytes)
I0617 11:36:46.521138 691879 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.1/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I0617 11:36:46.797205 691879 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I0617 11:36:46.797343 691879 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.1/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I0617 11:36:46.797425 691879 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes addons-134601 minikube.k8s.io/updated_at=2024_06_17T11_36_46_0700 minikube.k8s.io/version=v1.33.1 minikube.k8s.io/commit=e6835d8632d8e28da57a827eb12d7b852b17a9f6 minikube.k8s.io/name=addons-134601 minikube.k8s.io/primary=true
I0617 11:36:46.956611 691879 ops.go:34] apiserver oom_adj: -16
I0617 11:36:46.956719 691879 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0617 11:36:47.457860 691879 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0617 11:36:47.957299 691879 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0617 11:36:48.457577 691879 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0617 11:36:48.956777 691879 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0617 11:36:49.457572 691879 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0617 11:36:49.957357 691879 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0617 11:36:50.456873 691879 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0617 11:36:50.957105 691879 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0617 11:36:51.456870 691879 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0617 11:36:51.957288 691879 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0617 11:36:52.457173 691879 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0617 11:36:52.957583 691879 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0617 11:36:53.457863 691879 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0617 11:36:53.957478 691879 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0617 11:36:54.457385 691879 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0617 11:36:54.957589 691879 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0617 11:36:55.457698 691879 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0617 11:36:55.956846 691879 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0617 11:36:56.457516 691879 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0617 11:36:56.957081 691879 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0617 11:36:57.457739 691879 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0617 11:36:57.957106 691879 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0617 11:36:58.457678 691879 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0617 11:36:58.957125 691879 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0617 11:36:59.457819 691879 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.30.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0617 11:36:59.560899 691879 kubeadm.go:1107] duration metric: took 12.763602634s to wait for elevateKubeSystemPrivileges
W0617 11:36:59.560931 691879 kubeadm.go:286] apiserver tunnel failed: apiserver port not set
I0617 11:36:59.560938 691879 kubeadm.go:393] duration metric: took 32.217500526s to StartCluster
I0617 11:36:59.560953 691879 settings.go:142] acquiring lock: {Name:mk2a85dcb9c00537cffe742aea475ca7d2cf09a4 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0617 11:36:59.561058 691879 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/19084-685849/kubeconfig
I0617 11:36:59.561443 691879 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19084-685849/kubeconfig: {Name:mk0f1db8295cd0d3b8a0428491dac563579b7b2b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0617 11:36:59.561625 691879 start.go:234] Will wait 6m0s for node &{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.30.1 ContainerRuntime:containerd ControlPlane:true Worker:true}
I0617 11:36:59.563506 691879 out.go:177] * Verifying Kubernetes components...
I0617 11:36:59.561758 691879 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.30.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I0617 11:36:59.561940 691879 config.go:182] Loaded profile config "addons-134601": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.30.1
I0617 11:36:59.561950 691879 addons.go:507] enable addons start: toEnable=map[ambassador:false auto-pause:false cloud-spanner:true csi-hostpath-driver:true dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:true gvisor:false headlamp:false helm-tiller:false inaccel:false ingress:true ingress-dns:true inspektor-gadget:true istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:true nvidia-device-plugin:true nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:true registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:true volcano:true volumesnapshots:true yakd:true]
I0617 11:36:59.565153 691879 addons.go:69] Setting yakd=true in profile "addons-134601"
I0617 11:36:59.565181 691879 addons.go:234] Setting addon yakd=true in "addons-134601"
I0617 11:36:59.565215 691879 host.go:66] Checking if "addons-134601" exists ...
I0617 11:36:59.565683 691879 cli_runner.go:164] Run: docker container inspect addons-134601 --format={{.State.Status}}
I0617 11:36:59.565772 691879 addons.go:69] Setting ingress-dns=true in profile "addons-134601"
I0617 11:36:59.565794 691879 addons.go:234] Setting addon ingress-dns=true in "addons-134601"
I0617 11:36:59.565820 691879 host.go:66] Checking if "addons-134601" exists ...
I0617 11:36:59.566190 691879 cli_runner.go:164] Run: docker container inspect addons-134601 --format={{.State.Status}}
I0617 11:36:59.566631 691879 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0617 11:36:59.566786 691879 addons.go:69] Setting cloud-spanner=true in profile "addons-134601"
I0617 11:36:59.566808 691879 addons.go:234] Setting addon cloud-spanner=true in "addons-134601"
I0617 11:36:59.566828 691879 host.go:66] Checking if "addons-134601" exists ...
I0617 11:36:59.567182 691879 cli_runner.go:164] Run: docker container inspect addons-134601 --format={{.State.Status}}
I0617 11:36:59.567800 691879 addons.go:69] Setting csi-hostpath-driver=true in profile "addons-134601"
I0617 11:36:59.567846 691879 addons.go:234] Setting addon csi-hostpath-driver=true in "addons-134601"
I0617 11:36:59.567876 691879 host.go:66] Checking if "addons-134601" exists ...
I0617 11:36:59.568228 691879 cli_runner.go:164] Run: docker container inspect addons-134601 --format={{.State.Status}}
I0617 11:36:59.569687 691879 addons.go:69] Setting inspektor-gadget=true in profile "addons-134601"
I0617 11:36:59.569715 691879 addons.go:234] Setting addon inspektor-gadget=true in "addons-134601"
I0617 11:36:59.569739 691879 host.go:66] Checking if "addons-134601" exists ...
I0617 11:36:59.570115 691879 cli_runner.go:164] Run: docker container inspect addons-134601 --format={{.State.Status}}
I0617 11:36:59.575800 691879 addons.go:69] Setting metrics-server=true in profile "addons-134601"
I0617 11:36:59.575845 691879 addons.go:234] Setting addon metrics-server=true in "addons-134601"
I0617 11:36:59.575889 691879 host.go:66] Checking if "addons-134601" exists ...
I0617 11:36:59.576297 691879 cli_runner.go:164] Run: docker container inspect addons-134601 --format={{.State.Status}}
I0617 11:36:59.580052 691879 addons.go:69] Setting default-storageclass=true in profile "addons-134601"
I0617 11:36:59.603538 691879 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "addons-134601"
I0617 11:36:59.603916 691879 cli_runner.go:164] Run: docker container inspect addons-134601 --format={{.State.Status}}
I0617 11:36:59.580212 691879 addons.go:69] Setting gcp-auth=true in profile "addons-134601"
I0617 11:36:59.606897 691879 mustload.go:65] Loading cluster: addons-134601
I0617 11:36:59.610539 691879 config.go:182] Loaded profile config "addons-134601": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.30.1
I0617 11:36:59.610946 691879 cli_runner.go:164] Run: docker container inspect addons-134601 --format={{.State.Status}}
I0617 11:36:59.580223 691879 addons.go:69] Setting ingress=true in profile "addons-134601"
I0617 11:36:59.626931 691879 addons.go:234] Setting addon ingress=true in "addons-134601"
I0617 11:36:59.627010 691879 host.go:66] Checking if "addons-134601" exists ...
I0617 11:36:59.585684 691879 addons.go:69] Setting nvidia-device-plugin=true in profile "addons-134601"
I0617 11:36:59.640766 691879 addons.go:234] Setting addon nvidia-device-plugin=true in "addons-134601"
I0617 11:36:59.640835 691879 host.go:66] Checking if "addons-134601" exists ...
I0617 11:36:59.641298 691879 cli_runner.go:164] Run: docker container inspect addons-134601 --format={{.State.Status}}
I0617 11:36:59.642802 691879 cli_runner.go:164] Run: docker container inspect addons-134601 --format={{.State.Status}}
I0617 11:36:59.585696 691879 addons.go:69] Setting registry=true in profile "addons-134601"
I0617 11:36:59.649768 691879 addons.go:234] Setting addon registry=true in "addons-134601"
I0617 11:36:59.649836 691879 host.go:66] Checking if "addons-134601" exists ...
I0617 11:36:59.650365 691879 cli_runner.go:164] Run: docker container inspect addons-134601 --format={{.State.Status}}
I0617 11:36:59.585706 691879 addons.go:69] Setting storage-provisioner=true in profile "addons-134601"
I0617 11:36:59.671592 691879 addons.go:234] Setting addon storage-provisioner=true in "addons-134601"
I0617 11:36:59.671668 691879 host.go:66] Checking if "addons-134601" exists ...
I0617 11:36:59.672150 691879 cli_runner.go:164] Run: docker container inspect addons-134601 --format={{.State.Status}}
I0617 11:36:59.585710 691879 addons.go:69] Setting storage-provisioner-rancher=true in profile "addons-134601"
I0617 11:36:59.585712 691879 addons.go:69] Setting volcano=true in profile "addons-134601"
I0617 11:36:59.585716 691879 addons.go:69] Setting volumesnapshots=true in profile "addons-134601"
I0617 11:36:59.687093 691879 addons.go:234] Setting addon volumesnapshots=true in "addons-134601"
I0617 11:36:59.687140 691879 host.go:66] Checking if "addons-134601" exists ...
I0617 11:36:59.687734 691879 cli_runner.go:164] Run: docker container inspect addons-134601 --format={{.State.Status}}
I0617 11:36:59.729855 691879 out.go:177] - Using image docker.io/marcnuri/yakd:0.0.4
I0617 11:36:59.734812 691879 addons.go:431] installing /etc/kubernetes/addons/yakd-ns.yaml
I0617 11:36:59.734905 691879 ssh_runner.go:362] scp yakd/yakd-ns.yaml --> /etc/kubernetes/addons/yakd-ns.yaml (171 bytes)
I0617 11:36:59.735009 691879 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-134601
I0617 11:36:59.729505 691879 out.go:177] - Using image gcr.io/k8s-minikube/minikube-ingress-dns:0.0.2
I0617 11:36:59.729535 691879 addons_storage_classes.go:33] enableOrDisableStorageClasses storage-provisioner-rancher=true on "addons-134601"
I0617 11:36:59.729563 691879 addons.go:234] Setting addon volcano=true in "addons-134601"
I0617 11:36:59.749469 691879 out.go:177] - Using image gcr.io/cloud-spanner-emulator/emulator:1.5.17
I0617 11:36:59.749898 691879 cli_runner.go:164] Run: docker container inspect addons-134601 --format={{.State.Status}}
I0617 11:36:59.749942 691879 host.go:66] Checking if "addons-134601" exists ...
I0617 11:36:59.753802 691879 out.go:177] - Using image ghcr.io/inspektor-gadget/inspektor-gadget:v0.29.0
I0617 11:36:59.753809 691879 out.go:177] - Using image registry.k8s.io/sig-storage/csi-attacher:v4.0.0
I0617 11:36:59.767647 691879 addons.go:431] installing /etc/kubernetes/addons/deployment.yaml
I0617 11:36:59.767712 691879 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/deployment.yaml (1004 bytes)
I0617 11:36:59.767814 691879 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-134601
I0617 11:36:59.776177 691879 cli_runner.go:164] Run: docker container inspect addons-134601 --format={{.State.Status}}
I0617 11:36:59.755532 691879 addons.go:431] installing /etc/kubernetes/addons/ingress-dns-pod.yaml
I0617 11:36:59.787593 691879 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ingress-dns-pod.yaml (2442 bytes)
I0617 11:36:59.787682 691879 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-134601
I0617 11:36:59.806063 691879 addons.go:431] installing /etc/kubernetes/addons/ig-namespace.yaml
I0617 11:36:59.806083 691879 ssh_runner.go:362] scp inspektor-gadget/ig-namespace.yaml --> /etc/kubernetes/addons/ig-namespace.yaml (55 bytes)
I0617 11:36:59.806147 691879 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-134601
I0617 11:36:59.808058 691879 out.go:177] - Using image registry.k8s.io/sig-storage/csi-external-health-monitor-controller:v0.7.0
I0617 11:36:59.831979 691879 out.go:177] - Using image registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.6.0
I0617 11:36:59.804669 691879 addons.go:234] Setting addon default-storageclass=true in "addons-134601"
I0617 11:36:59.804955 691879 host.go:66] Checking if "addons-134601" exists ...
I0617 11:36:59.844682 691879 out.go:177] - Using image nvcr.io/nvidia/k8s-device-plugin:v0.15.0
I0617 11:36:59.846826 691879 addons.go:431] installing /etc/kubernetes/addons/nvidia-device-plugin.yaml
I0617 11:36:59.846861 691879 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/nvidia-device-plugin.yaml (1966 bytes)
I0617 11:36:59.846927 691879 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-134601
I0617 11:36:59.869099 691879 out.go:177] - Using image registry.k8s.io/sig-storage/hostpathplugin:v1.9.0
I0617 11:36:59.839681 691879 host.go:66] Checking if "addons-134601" exists ...
I0617 11:36:59.839692 691879 out.go:177] - Using image registry.k8s.io/metrics-server/metrics-server:v0.7.1
I0617 11:36:59.884967 691879 cli_runner.go:164] Run: docker container inspect addons-134601 --format={{.State.Status}}
I0617 11:36:59.893177 691879 out.go:177] - Using image docker.io/registry:2.8.3
I0617 11:36:59.893186 691879 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.1
I0617 11:36:59.893191 691879 out.go:177] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I0617 11:36:59.893194 691879 out.go:177] - Using image registry.k8s.io/sig-storage/livenessprobe:v2.8.0
I0617 11:36:59.899806 691879 out.go:177] - Using image registry.k8s.io/sig-storage/csi-resizer:v1.6.0
I0617 11:36:59.897891 691879 addons.go:431] installing /etc/kubernetes/addons/metrics-apiservice.yaml
I0617 11:36:59.897902 691879 out.go:177] - Using image docker.io/volcanosh/vc-controller-manager:v1.7.0
I0617 11:36:59.902511 691879 ssh_runner.go:362] scp metrics-server/metrics-apiservice.yaml --> /etc/kubernetes/addons/metrics-apiservice.yaml (424 bytes)
I0617 11:36:59.906099 691879 addons.go:431] installing /etc/kubernetes/addons/storage-provisioner.yaml
I0617 11:36:59.906108 691879 out.go:177] - Using image registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0
I0617 11:36:59.906257 691879 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-134601
I0617 11:36:59.910595 691879 out.go:177] - Using image registry.k8s.io/ingress-nginx/controller:v1.10.1
I0617 11:36:59.913344 691879 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.1
I0617 11:36:59.910644 691879 out.go:177] - Using image gcr.io/k8s-minikube/kube-registry-proxy:0.0.6
I0617 11:36:59.910760 691879 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I0617 11:36:59.915284 691879 addons.go:431] installing /etc/kubernetes/addons/ingress-deploy.yaml
I0617 11:36:59.915293 691879 out.go:177] - Using image docker.io/volcanosh/vc-scheduler:v1.7.0
I0617 11:36:59.920068 691879 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-134601
I0617 11:36:59.925082 691879 out.go:177] - Using image registry.k8s.io/sig-storage/snapshot-controller:v6.1.0
I0617 11:36:59.925106 691879 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ingress-deploy.yaml (16078 bytes)
I0617 11:36:59.933103 691879 out.go:177] - Using image docker.io/volcanosh/vc-webhook-manager:v1.7.0
I0617 11:36:59.931805 691879 addons.go:431] installing /etc/kubernetes/addons/registry-rc.yaml
I0617 11:36:59.931815 691879 out.go:177] - Using image registry.k8s.io/sig-storage/csi-provisioner:v3.3.0
I0617 11:36:59.932417 691879 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-134601
I0617 11:36:59.980521 691879 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-rc.yaml (798 bytes)
I0617 11:36:59.980673 691879 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-134601
I0617 11:36:59.990079 691879 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml
I0617 11:36:59.990102 691879 ssh_runner.go:362] scp volumesnapshots/csi-hostpath-snapshotclass.yaml --> /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml (934 bytes)
I0617 11:36:59.990160 691879 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-134601
I0617 11:36:59.997119 691879 addons.go:431] installing /etc/kubernetes/addons/rbac-external-attacher.yaml
I0617 11:36:59.997144 691879 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-attacher.yaml --> /etc/kubernetes/addons/rbac-external-attacher.yaml (3073 bytes)
I0617 11:36:59.997214 691879 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-134601
I0617 11:37:00.030374 691879 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33537 SSHKeyPath:/home/jenkins/minikube-integration/19084-685849/.minikube/machines/addons-134601/id_rsa Username:docker}
I0617 11:37:00.031872 691879 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33537 SSHKeyPath:/home/jenkins/minikube-integration/19084-685849/.minikube/machines/addons-134601/id_rsa Username:docker}
I0617 11:37:00.043864 691879 addons.go:431] installing /etc/kubernetes/addons/storageclass.yaml
I0617 11:37:00.043936 691879 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I0617 11:37:00.044034 691879 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-134601
I0617 11:37:00.058740 691879 addons.go:234] Setting addon storage-provisioner-rancher=true in "addons-134601"
I0617 11:37:00.058798 691879 host.go:66] Checking if "addons-134601" exists ...
I0617 11:37:00.059266 691879 cli_runner.go:164] Run: docker container inspect addons-134601 --format={{.State.Status}}
I0617 11:37:00.062828 691879 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33537 SSHKeyPath:/home/jenkins/minikube-integration/19084-685849/.minikube/machines/addons-134601/id_rsa Username:docker}
I0617 11:37:00.063868 691879 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33537 SSHKeyPath:/home/jenkins/minikube-integration/19084-685849/.minikube/machines/addons-134601/id_rsa Username:docker}
I0617 11:37:00.086649 691879 addons.go:431] installing /etc/kubernetes/addons/volcano-deployment.yaml
I0617 11:37:00.086737 691879 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/volcano-deployment.yaml (626760 bytes)
I0617 11:37:00.086859 691879 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-134601
I0617 11:37:00.107917 691879 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33537 SSHKeyPath:/home/jenkins/minikube-integration/19084-685849/.minikube/machines/addons-134601/id_rsa Username:docker}
I0617 11:37:00.108339 691879 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33537 SSHKeyPath:/home/jenkins/minikube-integration/19084-685849/.minikube/machines/addons-134601/id_rsa Username:docker}
I0617 11:37:00.144612 691879 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0617 11:37:00.145016 691879 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.30.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.30.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I0617 11:37:00.173080 691879 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33537 SSHKeyPath:/home/jenkins/minikube-integration/19084-685849/.minikube/machines/addons-134601/id_rsa Username:docker}
I0617 11:37:00.184186 691879 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33537 SSHKeyPath:/home/jenkins/minikube-integration/19084-685849/.minikube/machines/addons-134601/id_rsa Username:docker}
I0617 11:37:00.185144 691879 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33537 SSHKeyPath:/home/jenkins/minikube-integration/19084-685849/.minikube/machines/addons-134601/id_rsa Username:docker}
I0617 11:37:00.216925 691879 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33537 SSHKeyPath:/home/jenkins/minikube-integration/19084-685849/.minikube/machines/addons-134601/id_rsa Username:docker}
I0617 11:37:00.242299 691879 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33537 SSHKeyPath:/home/jenkins/minikube-integration/19084-685849/.minikube/machines/addons-134601/id_rsa Username:docker}
W0617 11:37:00.248898 691879 sshutil.go:64] dial failure (will retry): ssh: handshake failed: EOF
I0617 11:37:00.248953 691879 retry.go:31] will retry after 316.650336ms: ssh: handshake failed: EOF
I0617 11:37:00.262092 691879 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33537 SSHKeyPath:/home/jenkins/minikube-integration/19084-685849/.minikube/machines/addons-134601/id_rsa Username:docker}
I0617 11:37:00.264912 691879 out.go:177] - Using image docker.io/busybox:stable
I0617 11:37:00.262757 691879 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33537 SSHKeyPath:/home/jenkins/minikube-integration/19084-685849/.minikube/machines/addons-134601/id_rsa Username:docker}
I0617 11:37:00.269133 691879 out.go:177] - Using image docker.io/rancher/local-path-provisioner:v0.0.22
I0617 11:37:00.271087 691879 addons.go:431] installing /etc/kubernetes/addons/storage-provisioner-rancher.yaml
I0617 11:37:00.271111 691879 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner-rancher.yaml (3113 bytes)
I0617 11:37:00.271194 691879 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-134601
I0617 11:37:00.309196 691879 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33537 SSHKeyPath:/home/jenkins/minikube-integration/19084-685849/.minikube/machines/addons-134601/id_rsa Username:docker}
W0617 11:37:00.310391 691879 sshutil.go:64] dial failure (will retry): ssh: handshake failed: EOF
I0617 11:37:00.310434 691879 retry.go:31] will retry after 320.395408ms: ssh: handshake failed: EOF
I0617 11:37:00.415945 691879 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.1/kubectl apply -f /etc/kubernetes/addons/deployment.yaml
I0617 11:37:00.418442 691879 addons.go:431] installing /etc/kubernetes/addons/metrics-server-deployment.yaml
I0617 11:37:00.418463 691879 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-deployment.yaml (1907 bytes)
I0617 11:37:00.561630 691879 addons.go:431] installing /etc/kubernetes/addons/metrics-server-rbac.yaml
I0617 11:37:00.561657 691879 ssh_runner.go:362] scp metrics-server/metrics-server-rbac.yaml --> /etc/kubernetes/addons/metrics-server-rbac.yaml (2175 bytes)
I0617 11:37:00.582065 691879 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.1/kubectl apply -f /etc/kubernetes/addons/ingress-deploy.yaml
I0617 11:37:00.617776 691879 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.1/kubectl apply -f /etc/kubernetes/addons/nvidia-device-plugin.yaml
I0617 11:37:00.624420 691879 addons.go:431] installing /etc/kubernetes/addons/yakd-sa.yaml
I0617 11:37:00.624448 691879 ssh_runner.go:362] scp yakd/yakd-sa.yaml --> /etc/kubernetes/addons/yakd-sa.yaml (247 bytes)
I0617 11:37:00.671752 691879 addons.go:431] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml
I0617 11:37:00.671780 691879 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshotclasses.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml (6471 bytes)
I0617 11:37:00.683640 691879 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.1/kubectl apply -f /etc/kubernetes/addons/ingress-dns-pod.yaml
I0617 11:37:00.710134 691879 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I0617 11:37:00.732224 691879 addons.go:431] installing /etc/kubernetes/addons/registry-svc.yaml
I0617 11:37:00.732252 691879 ssh_runner.go:362] scp registry/registry-svc.yaml --> /etc/kubernetes/addons/registry-svc.yaml (398 bytes)
I0617 11:37:00.775556 691879 addons.go:431] installing /etc/kubernetes/addons/metrics-server-service.yaml
I0617 11:37:00.775585 691879 ssh_runner.go:362] scp metrics-server/metrics-server-service.yaml --> /etc/kubernetes/addons/metrics-server-service.yaml (446 bytes)
I0617 11:37:00.794605 691879 addons.go:431] installing /etc/kubernetes/addons/ig-serviceaccount.yaml
I0617 11:37:00.794677 691879 ssh_runner.go:362] scp inspektor-gadget/ig-serviceaccount.yaml --> /etc/kubernetes/addons/ig-serviceaccount.yaml (80 bytes)
I0617 11:37:00.806299 691879 addons.go:431] installing /etc/kubernetes/addons/yakd-crb.yaml
I0617 11:37:00.806335 691879 ssh_runner.go:362] scp yakd/yakd-crb.yaml --> /etc/kubernetes/addons/yakd-crb.yaml (422 bytes)
I0617 11:37:00.810687 691879 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I0617 11:37:00.832329 691879 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.1/kubectl apply -f /etc/kubernetes/addons/volcano-deployment.yaml
I0617 11:37:00.918554 691879 addons.go:431] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml
I0617 11:37:00.918620 691879 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshotcontents.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml (23126 bytes)
I0617 11:37:00.934343 691879 addons.go:431] installing /etc/kubernetes/addons/registry-proxy.yaml
I0617 11:37:00.934373 691879 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-proxy.yaml (947 bytes)
I0617 11:37:01.036415 691879 addons.go:431] installing /etc/kubernetes/addons/yakd-svc.yaml
I0617 11:37:01.036445 691879 ssh_runner.go:362] scp yakd/yakd-svc.yaml --> /etc/kubernetes/addons/yakd-svc.yaml (412 bytes)
I0617 11:37:01.068180 691879 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.1/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
I0617 11:37:01.089567 691879 addons.go:431] installing /etc/kubernetes/addons/ig-role.yaml
I0617 11:37:01.089595 691879 ssh_runner.go:362] scp inspektor-gadget/ig-role.yaml --> /etc/kubernetes/addons/ig-role.yaml (210 bytes)
I0617 11:37:01.154173 691879 addons.go:431] installing /etc/kubernetes/addons/rbac-hostpath.yaml
I0617 11:37:01.154202 691879 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-hostpath.yaml --> /etc/kubernetes/addons/rbac-hostpath.yaml (4266 bytes)
I0617 11:37:01.202729 691879 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.1/kubectl apply -f /etc/kubernetes/addons/registry-rc.yaml -f /etc/kubernetes/addons/registry-svc.yaml -f /etc/kubernetes/addons/registry-proxy.yaml
I0617 11:37:01.232347 691879 addons.go:431] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml
I0617 11:37:01.232376 691879 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshots.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml (19582 bytes)
I0617 11:37:01.292374 691879 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner-rancher.yaml
I0617 11:37:01.295512 691879 addons.go:431] installing /etc/kubernetes/addons/ig-rolebinding.yaml
I0617 11:37:01.295539 691879 ssh_runner.go:362] scp inspektor-gadget/ig-rolebinding.yaml --> /etc/kubernetes/addons/ig-rolebinding.yaml (244 bytes)
I0617 11:37:01.364207 691879 addons.go:431] installing /etc/kubernetes/addons/yakd-dp.yaml
I0617 11:37:01.364238 691879 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/yakd-dp.yaml (2017 bytes)
I0617 11:37:01.512069 691879 addons.go:431] installing /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml
I0617 11:37:01.512100 691879 ssh_runner.go:362] scp volumesnapshots/rbac-volume-snapshot-controller.yaml --> /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml (3545 bytes)
I0617 11:37:01.717874 691879 addons.go:431] installing /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I0617 11:37:01.717898 691879 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml (1475 bytes)
I0617 11:37:01.757606 691879 addons.go:431] installing /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml
I0617 11:37:01.757646 691879 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-health-monitor-controller.yaml --> /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml (3038 bytes)
I0617 11:37:01.758431 691879 addons.go:431] installing /etc/kubernetes/addons/ig-clusterrole.yaml
I0617 11:37:01.758477 691879 ssh_runner.go:362] scp inspektor-gadget/ig-clusterrole.yaml --> /etc/kubernetes/addons/ig-clusterrole.yaml (1485 bytes)
I0617 11:37:01.820572 691879 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.1/kubectl apply -f /etc/kubernetes/addons/yakd-ns.yaml -f /etc/kubernetes/addons/yakd-sa.yaml -f /etc/kubernetes/addons/yakd-crb.yaml -f /etc/kubernetes/addons/yakd-svc.yaml -f /etc/kubernetes/addons/yakd-dp.yaml
I0617 11:37:02.055917 691879 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.30.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.30.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -": (1.910872599s)
I0617 11:37:02.056060 691879 start.go:946] {"host.minikube.internal": 192.168.49.1} host record injected into CoreDNS's ConfigMap
I0617 11:37:02.055999 691879 ssh_runner.go:235] Completed: sudo systemctl start kubelet: (1.91136117s)
I0617 11:37:02.057028 691879 node_ready.go:35] waiting up to 6m0s for node "addons-134601" to be "Ready" ...
I0617 11:37:02.060556 691879 addons.go:431] installing /etc/kubernetes/addons/ig-clusterrolebinding.yaml
I0617 11:37:02.060597 691879 ssh_runner.go:362] scp inspektor-gadget/ig-clusterrolebinding.yaml --> /etc/kubernetes/addons/ig-clusterrolebinding.yaml (274 bytes)
I0617 11:37:02.068121 691879 node_ready.go:49] node "addons-134601" has status "Ready":"True"
I0617 11:37:02.068202 691879 node_ready.go:38] duration metric: took 11.143135ms for node "addons-134601" to be "Ready" ...
I0617 11:37:02.068228 691879 pod_ready.go:35] extra waiting up to 6m0s for all system-critical pods including labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0617 11:37:02.079039 691879 pod_ready.go:78] waiting up to 6m0s for pod "coredns-7db6d8ff4d-dhctc" in "kube-system" namespace to be "Ready" ...
I0617 11:37:02.099436 691879 addons.go:431] installing /etc/kubernetes/addons/rbac-external-provisioner.yaml
I0617 11:37:02.099502 691879 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-provisioner.yaml --> /etc/kubernetes/addons/rbac-external-provisioner.yaml (4442 bytes)
I0617 11:37:02.162683 691879 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.1/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I0617 11:37:02.391591 691879 addons.go:431] installing /etc/kubernetes/addons/rbac-external-resizer.yaml
I0617 11:37:02.391656 691879 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-resizer.yaml --> /etc/kubernetes/addons/rbac-external-resizer.yaml (2943 bytes)
I0617 11:37:02.413079 691879 addons.go:431] installing /etc/kubernetes/addons/ig-crd.yaml
I0617 11:37:02.413142 691879 ssh_runner.go:362] scp inspektor-gadget/ig-crd.yaml --> /etc/kubernetes/addons/ig-crd.yaml (5216 bytes)
I0617 11:37:02.561276 691879 kapi.go:248] "coredns" deployment in "kube-system" namespace and "addons-134601" context rescaled to 1 replicas
I0617 11:37:02.614155 691879 addons.go:431] installing /etc/kubernetes/addons/rbac-external-snapshotter.yaml
I0617 11:37:02.614229 691879 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-snapshotter.yaml --> /etc/kubernetes/addons/rbac-external-snapshotter.yaml (3149 bytes)
I0617 11:37:02.778712 691879 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.1/kubectl apply -f /etc/kubernetes/addons/deployment.yaml: (2.362716108s)
I0617 11:37:02.795652 691879 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-attacher.yaml
I0617 11:37:02.795725 691879 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-attacher.yaml (2143 bytes)
I0617 11:37:02.879482 691879 addons.go:431] installing /etc/kubernetes/addons/ig-daemonset.yaml
I0617 11:37:02.879552 691879 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-daemonset.yaml (7735 bytes)
I0617 11:37:03.082095 691879 pod_ready.go:97] error getting pod "coredns-7db6d8ff4d-dhctc" in "kube-system" namespace (skipping!): pods "coredns-7db6d8ff4d-dhctc" not found
I0617 11:37:03.082174 691879 pod_ready.go:81] duration metric: took 1.003055428s for pod "coredns-7db6d8ff4d-dhctc" in "kube-system" namespace to be "Ready" ...
E0617 11:37:03.082201 691879 pod_ready.go:66] WaitExtra: waitPodCondition: error getting pod "coredns-7db6d8ff4d-dhctc" in "kube-system" namespace (skipping!): pods "coredns-7db6d8ff4d-dhctc" not found
I0617 11:37:03.082222 691879 pod_ready.go:78] waiting up to 6m0s for pod "coredns-7db6d8ff4d-rcpbp" in "kube-system" namespace to be "Ready" ...
I0617 11:37:03.471591 691879 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml
I0617 11:37:03.471653 691879 ssh_runner.go:362] scp csi-hostpath-driver/deploy/csi-hostpath-driverinfo.yaml --> /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml (1274 bytes)
I0617 11:37:03.574776 691879 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.1/kubectl apply -f /etc/kubernetes/addons/ig-namespace.yaml -f /etc/kubernetes/addons/ig-serviceaccount.yaml -f /etc/kubernetes/addons/ig-role.yaml -f /etc/kubernetes/addons/ig-rolebinding.yaml -f /etc/kubernetes/addons/ig-clusterrole.yaml -f /etc/kubernetes/addons/ig-clusterrolebinding.yaml -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-daemonset.yaml
I0617 11:37:04.027682 691879 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-plugin.yaml
I0617 11:37:04.027742 691879 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-plugin.yaml (8201 bytes)
I0617 11:37:04.340618 691879 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-resizer.yaml
I0617 11:37:04.340692 691879 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-resizer.yaml (2191 bytes)
I0617 11:37:04.362746 691879 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-storageclass.yaml
I0617 11:37:04.362810 691879 ssh_runner.go:362] scp csi-hostpath-driver/deploy/csi-hostpath-storageclass.yaml --> /etc/kubernetes/addons/csi-hostpath-storageclass.yaml (846 bytes)
I0617 11:37:04.383100 691879 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.1/kubectl apply -f /etc/kubernetes/addons/rbac-external-attacher.yaml -f /etc/kubernetes/addons/rbac-hostpath.yaml -f /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml -f /etc/kubernetes/addons/rbac-external-provisioner.yaml -f /etc/kubernetes/addons/rbac-external-resizer.yaml -f /etc/kubernetes/addons/rbac-external-snapshotter.yaml -f /etc/kubernetes/addons/csi-hostpath-attacher.yaml -f /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml -f /etc/kubernetes/addons/csi-hostpath-plugin.yaml -f /etc/kubernetes/addons/csi-hostpath-resizer.yaml -f /etc/kubernetes/addons/csi-hostpath-storageclass.yaml
I0617 11:37:05.091243 691879 pod_ready.go:102] pod "coredns-7db6d8ff4d-rcpbp" in "kube-system" namespace has status "Ready":"False"
I0617 11:37:07.045288 691879 ssh_runner.go:362] scp memory --> /var/lib/minikube/google_application_credentials.json (162 bytes)
I0617 11:37:07.045430 691879 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-134601
I0617 11:37:07.068132 691879 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33537 SSHKeyPath:/home/jenkins/minikube-integration/19084-685849/.minikube/machines/addons-134601/id_rsa Username:docker}
I0617 11:37:07.626839 691879 ssh_runner.go:362] scp memory --> /var/lib/minikube/google_cloud_project (12 bytes)
I0617 11:37:07.670610 691879 pod_ready.go:102] pod "coredns-7db6d8ff4d-rcpbp" in "kube-system" namespace has status "Ready":"False"
I0617 11:37:07.907031 691879 addons.go:234] Setting addon gcp-auth=true in "addons-134601"
I0617 11:37:07.907132 691879 host.go:66] Checking if "addons-134601" exists ...
I0617 11:37:07.907613 691879 cli_runner.go:164] Run: docker container inspect addons-134601 --format={{.State.Status}}
I0617 11:37:07.931530 691879 ssh_runner.go:195] Run: cat /var/lib/minikube/google_application_credentials.json
I0617 11:37:07.931585 691879 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-134601
I0617 11:37:07.954389 691879 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:33537 SSHKeyPath:/home/jenkins/minikube-integration/19084-685849/.minikube/machines/addons-134601/id_rsa Username:docker}
I0617 11:37:08.840204 691879 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.1/kubectl apply -f /etc/kubernetes/addons/ingress-deploy.yaml: (8.258099102s)
I0617 11:37:08.840503 691879 addons.go:475] Verifying addon ingress=true in "addons-134601"
I0617 11:37:08.842515 691879 out.go:177] * Verifying ingress addon...
I0617 11:37:08.840626 691879 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (8.130272848s)
I0617 11:37:08.840390 691879 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.1/kubectl apply -f /etc/kubernetes/addons/ingress-dns-pod.yaml: (8.156728261s)
I0617 11:37:08.840455 691879 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: (8.029744752s)
I0617 11:37:08.840335 691879 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.1/kubectl apply -f /etc/kubernetes/addons/nvidia-device-plugin.yaml: (8.222528659s)
I0617 11:37:08.845374 691879 kapi.go:75] Waiting for pod with label "app.kubernetes.io/name=ingress-nginx" in ns "ingress-nginx" ...
I0617 11:37:08.850915 691879 kapi.go:86] Found 3 Pods for label selector app.kubernetes.io/name=ingress-nginx
I0617 11:37:08.850979 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:09.356556 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:09.856044 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:10.105780 691879 pod_ready.go:102] pod "coredns-7db6d8ff4d-rcpbp" in "kube-system" namespace has status "Ready":"False"
I0617 11:37:10.401027 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:10.587631 691879 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.1/kubectl apply -f /etc/kubernetes/addons/volcano-deployment.yaml: (9.755264506s)
I0617 11:37:10.587780 691879 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.1/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: (9.51954976s)
I0617 11:37:10.587811 691879 addons.go:475] Verifying addon metrics-server=true in "addons-134601"
I0617 11:37:10.587872 691879 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.1/kubectl apply -f /etc/kubernetes/addons/registry-rc.yaml -f /etc/kubernetes/addons/registry-svc.yaml -f /etc/kubernetes/addons/registry-proxy.yaml: (9.385114246s)
I0617 11:37:10.587901 691879 addons.go:475] Verifying addon registry=true in "addons-134601"
I0617 11:37:10.591068 691879 out.go:177] * Verifying registry addon...
I0617 11:37:10.588115 691879 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner-rancher.yaml: (9.295713917s)
I0617 11:37:10.588152 691879 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.1/kubectl apply -f /etc/kubernetes/addons/yakd-ns.yaml -f /etc/kubernetes/addons/yakd-sa.yaml -f /etc/kubernetes/addons/yakd-crb.yaml -f /etc/kubernetes/addons/yakd-svc.yaml -f /etc/kubernetes/addons/yakd-dp.yaml: (8.767551689s)
I0617 11:37:10.588304 691879 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.1/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: (8.425557087s)
I0617 11:37:10.588388 691879 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.1/kubectl apply -f /etc/kubernetes/addons/ig-namespace.yaml -f /etc/kubernetes/addons/ig-serviceaccount.yaml -f /etc/kubernetes/addons/ig-role.yaml -f /etc/kubernetes/addons/ig-rolebinding.yaml -f /etc/kubernetes/addons/ig-clusterrole.yaml -f /etc/kubernetes/addons/ig-clusterrolebinding.yaml -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-daemonset.yaml: (7.013544798s)
I0617 11:37:10.594413 691879 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=registry" in ns "kube-system" ...
I0617 11:37:10.596559 691879 out.go:177] * To access YAKD - Kubernetes Dashboard, wait for Pod to be ready and run the following command:
minikube -p addons-134601 service yakd-dashboard -n yakd-dashboard
W0617 11:37:10.591588 691879 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.1/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: Process exited with status 1
stdout:
customresourcedefinition.apiextensions.k8s.io/volumesnapshotclasses.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshotcontents.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshots.snapshot.storage.k8s.io created
serviceaccount/snapshot-controller created
clusterrole.rbac.authorization.k8s.io/snapshot-controller-runner created
clusterrolebinding.rbac.authorization.k8s.io/snapshot-controller-role created
role.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
rolebinding.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
deployment.apps/snapshot-controller created
stderr:
error: resource mapping not found for name: "csi-hostpath-snapclass" namespace: "" from "/etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml": no matches for kind "VolumeSnapshotClass" in version "snapshot.storage.k8s.io/v1"
ensure CRDs are installed first
I0617 11:37:10.598875 691879 retry.go:31] will retry after 213.957487ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.1/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: Process exited with status 1
stdout:
customresourcedefinition.apiextensions.k8s.io/volumesnapshotclasses.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshotcontents.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshots.snapshot.storage.k8s.io created
serviceaccount/snapshot-controller created
clusterrole.rbac.authorization.k8s.io/snapshot-controller-runner created
clusterrolebinding.rbac.authorization.k8s.io/snapshot-controller-role created
role.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
rolebinding.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
deployment.apps/snapshot-controller created
stderr:
error: resource mapping not found for name: "csi-hostpath-snapclass" namespace: "" from "/etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml": no matches for kind "VolumeSnapshotClass" in version "snapshot.storage.k8s.io/v1"
ensure CRDs are installed first
I0617 11:37:10.602511 691879 kapi.go:86] Found 2 Pods for label selector kubernetes.io/minikube-addons=registry
I0617 11:37:10.602580 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0617 11:37:10.813197 691879 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.1/kubectl apply --force -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I0617 11:37:10.872074 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:11.079854 691879 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.1/kubectl apply -f /etc/kubernetes/addons/rbac-external-attacher.yaml -f /etc/kubernetes/addons/rbac-hostpath.yaml -f /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml -f /etc/kubernetes/addons/rbac-external-provisioner.yaml -f /etc/kubernetes/addons/rbac-external-resizer.yaml -f /etc/kubernetes/addons/rbac-external-snapshotter.yaml -f /etc/kubernetes/addons/csi-hostpath-attacher.yaml -f /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml -f /etc/kubernetes/addons/csi-hostpath-plugin.yaml -f /etc/kubernetes/addons/csi-hostpath-resizer.yaml -f /etc/kubernetes/addons/csi-hostpath-storageclass.yaml: (6.696651858s)
I0617 11:37:11.079946 691879 addons.go:475] Verifying addon csi-hostpath-driver=true in "addons-134601"
I0617 11:37:11.082677 691879 out.go:177] * Verifying csi-hostpath-driver addon...
I0617 11:37:11.080220 691879 ssh_runner.go:235] Completed: cat /var/lib/minikube/google_application_credentials.json: (3.148666504s)
I0617 11:37:11.088005 691879 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.1
I0617 11:37:11.086604 691879 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=csi-hostpath-driver" in ns "kube-system" ...
I0617 11:37:11.092192 691879 out.go:177] - Using image gcr.io/k8s-minikube/gcp-auth-webhook:v0.1.2
I0617 11:37:11.093834 691879 addons.go:431] installing /etc/kubernetes/addons/gcp-auth-ns.yaml
I0617 11:37:11.093903 691879 ssh_runner.go:362] scp gcp-auth/gcp-auth-ns.yaml --> /etc/kubernetes/addons/gcp-auth-ns.yaml (700 bytes)
I0617 11:37:11.115157 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0617 11:37:11.116918 691879 kapi.go:86] Found 3 Pods for label selector kubernetes.io/minikube-addons=csi-hostpath-driver
I0617 11:37:11.116986 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:11.150651 691879 addons.go:431] installing /etc/kubernetes/addons/gcp-auth-service.yaml
I0617 11:37:11.150724 691879 ssh_runner.go:362] scp gcp-auth/gcp-auth-service.yaml --> /etc/kubernetes/addons/gcp-auth-service.yaml (788 bytes)
I0617 11:37:11.205859 691879 addons.go:431] installing /etc/kubernetes/addons/gcp-auth-webhook.yaml
I0617 11:37:11.205931 691879 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/gcp-auth-webhook.yaml (5421 bytes)
I0617 11:37:11.291754 691879 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.1/kubectl apply -f /etc/kubernetes/addons/gcp-auth-ns.yaml -f /etc/kubernetes/addons/gcp-auth-service.yaml -f /etc/kubernetes/addons/gcp-auth-webhook.yaml
I0617 11:37:11.350630 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:11.597317 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:11.600946 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0617 11:37:11.850087 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:12.096420 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:12.099555 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0617 11:37:12.358408 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:12.393501 691879 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.1/kubectl apply --force -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: (1.580255805s)
I0617 11:37:12.393655 691879 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.30.1/kubectl apply -f /etc/kubernetes/addons/gcp-auth-ns.yaml -f /etc/kubernetes/addons/gcp-auth-service.yaml -f /etc/kubernetes/addons/gcp-auth-webhook.yaml: (1.101814947s)
I0617 11:37:12.396865 691879 addons.go:475] Verifying addon gcp-auth=true in "addons-134601"
I0617 11:37:12.401390 691879 out.go:177] * Verifying gcp-auth addon...
I0617 11:37:12.404267 691879 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=gcp-auth" in ns "gcp-auth" ...
I0617 11:37:12.406828 691879 kapi.go:86] Found 0 Pods for label selector kubernetes.io/minikube-addons=gcp-auth
I0617 11:37:12.589492 691879 pod_ready.go:102] pod "coredns-7db6d8ff4d-rcpbp" in "kube-system" namespace has status "Ready":"False"
I0617 11:37:12.596612 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:12.601184 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0617 11:37:12.849616 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:13.096455 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:13.101010 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0617 11:37:13.352614 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:13.595979 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:13.600229 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0617 11:37:13.850227 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:14.101629 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:14.102508 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0617 11:37:14.355740 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:14.595574 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:14.603756 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0617 11:37:14.850315 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:15.090571 691879 pod_ready.go:102] pod "coredns-7db6d8ff4d-rcpbp" in "kube-system" namespace has status "Ready":"False"
I0617 11:37:15.100391 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:15.101967 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0617 11:37:15.352294 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:15.589278 691879 pod_ready.go:92] pod "coredns-7db6d8ff4d-rcpbp" in "kube-system" namespace has status "Ready":"True"
I0617 11:37:15.589352 691879 pod_ready.go:81] duration metric: took 12.507099007s for pod "coredns-7db6d8ff4d-rcpbp" in "kube-system" namespace to be "Ready" ...
I0617 11:37:15.589378 691879 pod_ready.go:78] waiting up to 6m0s for pod "etcd-addons-134601" in "kube-system" namespace to be "Ready" ...
I0617 11:37:15.595878 691879 pod_ready.go:92] pod "etcd-addons-134601" in "kube-system" namespace has status "Ready":"True"
I0617 11:37:15.595953 691879 pod_ready.go:81] duration metric: took 6.553654ms for pod "etcd-addons-134601" in "kube-system" namespace to be "Ready" ...
I0617 11:37:15.595983 691879 pod_ready.go:78] waiting up to 6m0s for pod "kube-apiserver-addons-134601" in "kube-system" namespace to be "Ready" ...
I0617 11:37:15.602375 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:15.603911 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0617 11:37:15.605192 691879 pod_ready.go:92] pod "kube-apiserver-addons-134601" in "kube-system" namespace has status "Ready":"True"
I0617 11:37:15.605273 691879 pod_ready.go:81] duration metric: took 9.268668ms for pod "kube-apiserver-addons-134601" in "kube-system" namespace to be "Ready" ...
I0617 11:37:15.605302 691879 pod_ready.go:78] waiting up to 6m0s for pod "kube-controller-manager-addons-134601" in "kube-system" namespace to be "Ready" ...
I0617 11:37:15.611115 691879 pod_ready.go:92] pod "kube-controller-manager-addons-134601" in "kube-system" namespace has status "Ready":"True"
I0617 11:37:15.611191 691879 pod_ready.go:81] duration metric: took 5.865571ms for pod "kube-controller-manager-addons-134601" in "kube-system" namespace to be "Ready" ...
I0617 11:37:15.611218 691879 pod_ready.go:78] waiting up to 6m0s for pod "kube-proxy-8dp6r" in "kube-system" namespace to be "Ready" ...
I0617 11:37:15.617355 691879 pod_ready.go:92] pod "kube-proxy-8dp6r" in "kube-system" namespace has status "Ready":"True"
I0617 11:37:15.617385 691879 pod_ready.go:81] duration metric: took 6.144572ms for pod "kube-proxy-8dp6r" in "kube-system" namespace to be "Ready" ...
I0617 11:37:15.617396 691879 pod_ready.go:78] waiting up to 6m0s for pod "kube-scheduler-addons-134601" in "kube-system" namespace to be "Ready" ...
I0617 11:37:15.852321 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:15.988548 691879 pod_ready.go:92] pod "kube-scheduler-addons-134601" in "kube-system" namespace has status "Ready":"True"
I0617 11:37:15.988574 691879 pod_ready.go:81] duration metric: took 371.170141ms for pod "kube-scheduler-addons-134601" in "kube-system" namespace to be "Ready" ...
I0617 11:37:15.988588 691879 pod_ready.go:78] waiting up to 6m0s for pod "nvidia-device-plugin-daemonset-q5vq2" in "kube-system" namespace to be "Ready" ...
I0617 11:37:16.095265 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:16.099645 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0617 11:37:16.353267 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:16.595046 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:16.599488 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0617 11:37:16.849739 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:17.095686 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:17.099932 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0617 11:37:17.350592 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:17.597165 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:17.600599 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0617 11:37:17.850651 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:17.997476 691879 pod_ready.go:102] pod "nvidia-device-plugin-daemonset-q5vq2" in "kube-system" namespace has status "Ready":"False"
I0617 11:37:18.114546 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0617 11:37:18.121221 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:18.349946 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:18.595991 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:18.599742 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0617 11:37:18.850617 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:19.097375 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:19.101038 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0617 11:37:19.350240 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:19.605156 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:19.610537 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0617 11:37:19.850895 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:20.096975 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:20.103293 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0617 11:37:20.350363 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:20.494549 691879 pod_ready.go:102] pod "nvidia-device-plugin-daemonset-q5vq2" in "kube-system" namespace has status "Ready":"False"
I0617 11:37:20.595590 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:20.599648 691879 kapi.go:107] duration metric: took 10.005230482s to wait for kubernetes.io/minikube-addons=registry ...
I0617 11:37:20.850631 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:21.096300 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:21.351515 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:21.604178 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:21.850474 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:22.097692 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:22.351576 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:22.496817 691879 pod_ready.go:102] pod "nvidia-device-plugin-daemonset-q5vq2" in "kube-system" namespace has status "Ready":"False"
I0617 11:37:22.597579 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:22.852466 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:23.098206 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:23.355369 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:23.600275 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:23.851050 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:24.096697 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:24.350341 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:24.596098 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:24.849980 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:24.995027 691879 pod_ready.go:102] pod "nvidia-device-plugin-daemonset-q5vq2" in "kube-system" namespace has status "Ready":"False"
I0617 11:37:25.096765 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:25.350177 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:25.595998 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:25.851778 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:26.097641 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:26.350119 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:26.597186 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:26.850765 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:26.996141 691879 pod_ready.go:102] pod "nvidia-device-plugin-daemonset-q5vq2" in "kube-system" namespace has status "Ready":"False"
I0617 11:37:27.098383 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:27.350435 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:27.595954 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:27.850546 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:28.096368 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:28.351304 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:28.598680 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:28.850079 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:29.096378 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:29.349773 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:29.496265 691879 pod_ready.go:102] pod "nvidia-device-plugin-daemonset-q5vq2" in "kube-system" namespace has status "Ready":"False"
I0617 11:37:29.610950 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:29.852177 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:30.110952 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:30.358777 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:30.595591 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:30.851897 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:31.096053 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:31.351461 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:31.596376 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:31.850629 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:31.996686 691879 pod_ready.go:102] pod "nvidia-device-plugin-daemonset-q5vq2" in "kube-system" namespace has status "Ready":"False"
I0617 11:37:32.097642 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:32.350443 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:32.595728 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:32.850193 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:33.097045 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:33.364035 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:33.596202 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:33.852514 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:34.095883 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:34.349378 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:34.495067 691879 pod_ready.go:102] pod "nvidia-device-plugin-daemonset-q5vq2" in "kube-system" namespace has status "Ready":"False"
I0617 11:37:34.595521 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:34.849941 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:35.096382 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:35.349556 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:35.596360 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:35.849695 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:36.096047 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:36.350503 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:36.503938 691879 pod_ready.go:102] pod "nvidia-device-plugin-daemonset-q5vq2" in "kube-system" namespace has status "Ready":"False"
I0617 11:37:36.595820 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:36.856160 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:37.096036 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:37.350032 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:37.599515 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:37.850005 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:38.095262 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:38.350181 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:38.595601 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:38.850294 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:38.995784 691879 pod_ready.go:102] pod "nvidia-device-plugin-daemonset-q5vq2" in "kube-system" namespace has status "Ready":"False"
I0617 11:37:39.096237 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:39.350168 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:39.595524 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:39.856494 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:40.096715 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:40.350015 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:40.596386 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:40.850502 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:41.098579 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:41.349862 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:41.521826 691879 pod_ready.go:102] pod "nvidia-device-plugin-daemonset-q5vq2" in "kube-system" namespace has status "Ready":"False"
I0617 11:37:41.597073 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:41.851263 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:42.098520 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:42.354980 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:42.596491 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:42.850009 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:43.096885 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:43.350094 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:43.595810 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:43.850076 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:43.994552 691879 pod_ready.go:102] pod "nvidia-device-plugin-daemonset-q5vq2" in "kube-system" namespace has status "Ready":"False"
I0617 11:37:44.095798 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:44.349846 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:44.595941 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:44.851506 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:45.099634 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:45.357710 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:45.597045 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:45.850885 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:45.995180 691879 pod_ready.go:102] pod "nvidia-device-plugin-daemonset-q5vq2" in "kube-system" namespace has status "Ready":"False"
I0617 11:37:46.095912 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:46.355608 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:46.596489 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:46.850568 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:47.096143 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:47.350214 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:47.596028 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:47.852841 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:47.996810 691879 pod_ready.go:102] pod "nvidia-device-plugin-daemonset-q5vq2" in "kube-system" namespace has status "Ready":"False"
I0617 11:37:48.096692 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:48.349624 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:48.600598 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:48.858396 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:49.010000 691879 pod_ready.go:92] pod "nvidia-device-plugin-daemonset-q5vq2" in "kube-system" namespace has status "Ready":"True"
I0617 11:37:49.010071 691879 pod_ready.go:81] duration metric: took 33.021448392s for pod "nvidia-device-plugin-daemonset-q5vq2" in "kube-system" namespace to be "Ready" ...
I0617 11:37:49.010095 691879 pod_ready.go:38] duration metric: took 46.94184029s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0617 11:37:49.010142 691879 api_server.go:52] waiting for apiserver process to appear ...
I0617 11:37:49.010240 691879 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0617 11:37:49.037085 691879 api_server.go:72] duration metric: took 49.475432504s to wait for apiserver process to appear ...
I0617 11:37:49.037112 691879 api_server.go:88] waiting for apiserver healthz status ...
I0617 11:37:49.037134 691879 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8443/healthz ...
I0617 11:37:49.045042 691879 api_server.go:279] https://192.168.49.2:8443/healthz returned 200:
ok
I0617 11:37:49.046083 691879 api_server.go:141] control plane version: v1.30.1
I0617 11:37:49.046141 691879 api_server.go:131] duration metric: took 9.020845ms to wait for apiserver health ...
I0617 11:37:49.046164 691879 system_pods.go:43] waiting for kube-system pods to appear ...
I0617 11:37:49.058276 691879 system_pods.go:59] 18 kube-system pods found
I0617 11:37:49.058352 691879 system_pods.go:61] "coredns-7db6d8ff4d-rcpbp" [4d131475-22c0-4162-a13c-060d85713663] Running
I0617 11:37:49.058373 691879 system_pods.go:61] "csi-hostpath-attacher-0" [082f66f2-278d-4b0c-8fa5-071f7b8a7bd0] Running
I0617 11:37:49.058393 691879 system_pods.go:61] "csi-hostpath-resizer-0" [f97050b0-96f1-4ce8-b3d8-97229b4bf912] Running
I0617 11:37:49.058428 691879 system_pods.go:61] "csi-hostpathplugin-brjpj" [56edcaa4-1c5d-4fef-9182-fac36192a21f] Pending / Ready:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter])
I0617 11:37:49.058456 691879 system_pods.go:61] "etcd-addons-134601" [1facb1fa-1533-4e70-aa63-af88db297139] Running
I0617 11:37:49.058480 691879 system_pods.go:61] "kindnet-j89dc" [840873ce-0d21-4437-b6d1-179226e9f7da] Running
I0617 11:37:49.058517 691879 system_pods.go:61] "kube-apiserver-addons-134601" [a614f4ca-c148-42ed-a2f3-9358f116ec5a] Running
I0617 11:37:49.058536 691879 system_pods.go:61] "kube-controller-manager-addons-134601" [6678d4be-e2bb-41f7-a556-8c7a905e9d99] Running
I0617 11:37:49.058560 691879 system_pods.go:61] "kube-ingress-dns-minikube" [1a0d9cf5-35cb-4250-8fe7-9e6ff7aadad9] Running / Ready:ContainersNotReady (containers with unready status: [minikube-ingress-dns]) / ContainersReady:ContainersNotReady (containers with unready status: [minikube-ingress-dns])
I0617 11:37:49.058596 691879 system_pods.go:61] "kube-proxy-8dp6r" [e9a7b296-7d12-497c-b893-a917b754a866] Running
I0617 11:37:49.058623 691879 system_pods.go:61] "kube-scheduler-addons-134601" [334f79fc-b405-4a33-8a3a-516acd4ab87a] Running
I0617 11:37:49.058645 691879 system_pods.go:61] "metrics-server-c59844bb4-q8m7p" [ae6a5fc6-8011-4441-a047-37a05043782d] Running
I0617 11:37:49.058670 691879 system_pods.go:61] "nvidia-device-plugin-daemonset-q5vq2" [670a7d96-6767-4d7a-b66e-430d9fd9ea84] Running
I0617 11:37:49.058701 691879 system_pods.go:61] "registry-kb4t9" [7de14dcf-fed8-4a0e-80ba-1bb85acaa099] Running
I0617 11:37:49.058725 691879 system_pods.go:61] "registry-proxy-8q9kp" [2f6405e9-dc4d-4d13-8f69-a273afd74af7] Running
I0617 11:37:49.058747 691879 system_pods.go:61] "snapshot-controller-745499f584-hj4lb" [801c7238-6beb-4469-8aba-5360c367f482] Running
I0617 11:37:49.058772 691879 system_pods.go:61] "snapshot-controller-745499f584-qlkj6" [6dcf3350-4d26-4e01-aef9-5b641e7bd68a] Running
I0617 11:37:49.058806 691879 system_pods.go:61] "storage-provisioner" [b82115e2-7802-46c5-8b7c-b6b278fc6ce1] Running
I0617 11:37:49.058832 691879 system_pods.go:74] duration metric: took 12.648955ms to wait for pod list to return data ...
I0617 11:37:49.058855 691879 default_sa.go:34] waiting for default service account to be created ...
I0617 11:37:49.062087 691879 default_sa.go:45] found service account: "default"
I0617 11:37:49.062108 691879 default_sa.go:55] duration metric: took 3.230694ms for default service account to be created ...
I0617 11:37:49.062116 691879 system_pods.go:116] waiting for k8s-apps to be running ...
I0617 11:37:49.074064 691879 system_pods.go:86] 18 kube-system pods found
I0617 11:37:49.074136 691879 system_pods.go:89] "coredns-7db6d8ff4d-rcpbp" [4d131475-22c0-4162-a13c-060d85713663] Running
I0617 11:37:49.074160 691879 system_pods.go:89] "csi-hostpath-attacher-0" [082f66f2-278d-4b0c-8fa5-071f7b8a7bd0] Running
I0617 11:37:49.074183 691879 system_pods.go:89] "csi-hostpath-resizer-0" [f97050b0-96f1-4ce8-b3d8-97229b4bf912] Running
I0617 11:37:49.074231 691879 system_pods.go:89] "csi-hostpathplugin-brjpj" [56edcaa4-1c5d-4fef-9182-fac36192a21f] Pending / Ready:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter])
I0617 11:37:49.074254 691879 system_pods.go:89] "etcd-addons-134601" [1facb1fa-1533-4e70-aa63-af88db297139] Running
I0617 11:37:49.074279 691879 system_pods.go:89] "kindnet-j89dc" [840873ce-0d21-4437-b6d1-179226e9f7da] Running
I0617 11:37:49.074311 691879 system_pods.go:89] "kube-apiserver-addons-134601" [a614f4ca-c148-42ed-a2f3-9358f116ec5a] Running
I0617 11:37:49.074338 691879 system_pods.go:89] "kube-controller-manager-addons-134601" [6678d4be-e2bb-41f7-a556-8c7a905e9d99] Running
I0617 11:37:49.074365 691879 system_pods.go:89] "kube-ingress-dns-minikube" [1a0d9cf5-35cb-4250-8fe7-9e6ff7aadad9] Running / Ready:ContainersNotReady (containers with unready status: [minikube-ingress-dns]) / ContainersReady:ContainersNotReady (containers with unready status: [minikube-ingress-dns])
I0617 11:37:49.074390 691879 system_pods.go:89] "kube-proxy-8dp6r" [e9a7b296-7d12-497c-b893-a917b754a866] Running
I0617 11:37:49.074426 691879 system_pods.go:89] "kube-scheduler-addons-134601" [334f79fc-b405-4a33-8a3a-516acd4ab87a] Running
I0617 11:37:49.074455 691879 system_pods.go:89] "metrics-server-c59844bb4-q8m7p" [ae6a5fc6-8011-4441-a047-37a05043782d] Running
I0617 11:37:49.074480 691879 system_pods.go:89] "nvidia-device-plugin-daemonset-q5vq2" [670a7d96-6767-4d7a-b66e-430d9fd9ea84] Running
I0617 11:37:49.074505 691879 system_pods.go:89] "registry-kb4t9" [7de14dcf-fed8-4a0e-80ba-1bb85acaa099] Running
I0617 11:37:49.074538 691879 system_pods.go:89] "registry-proxy-8q9kp" [2f6405e9-dc4d-4d13-8f69-a273afd74af7] Running
I0617 11:37:49.074566 691879 system_pods.go:89] "snapshot-controller-745499f584-hj4lb" [801c7238-6beb-4469-8aba-5360c367f482] Running
I0617 11:37:49.074588 691879 system_pods.go:89] "snapshot-controller-745499f584-qlkj6" [6dcf3350-4d26-4e01-aef9-5b641e7bd68a] Running
I0617 11:37:49.074613 691879 system_pods.go:89] "storage-provisioner" [b82115e2-7802-46c5-8b7c-b6b278fc6ce1] Running
I0617 11:37:49.074650 691879 system_pods.go:126] duration metric: took 12.528686ms to wait for k8s-apps to be running ...
I0617 11:37:49.074678 691879 system_svc.go:44] waiting for kubelet service to be running ....
I0617 11:37:49.074760 691879 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I0617 11:37:49.088724 691879 system_svc.go:56] duration metric: took 14.038141ms WaitForService to wait for kubelet
I0617 11:37:49.088800 691879 kubeadm.go:576] duration metric: took 49.527151931s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0617 11:37:49.088853 691879 node_conditions.go:102] verifying NodePressure condition ...
I0617 11:37:49.092613 691879 node_conditions.go:122] node storage ephemeral capacity is 203034800Ki
I0617 11:37:49.092688 691879 node_conditions.go:123] node cpu capacity is 2
I0617 11:37:49.092716 691879 node_conditions.go:105] duration metric: took 3.839542ms to run NodePressure ...
I0617 11:37:49.092743 691879 start.go:240] waiting for startup goroutines ...
I0617 11:37:49.099003 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:49.352117 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:49.602970 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:49.850768 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:50.120325 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:50.359741 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:50.599116 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:50.850444 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:51.098143 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:51.351224 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:51.596972 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:51.850710 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:52.095342 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:52.350501 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:52.595779 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:52.850239 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:53.096981 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:53.351088 691879 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0617 11:37:53.596584 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:53.857200 691879 kapi.go:107] duration metric: took 45.011822414s to wait for app.kubernetes.io/name=ingress-nginx ...
I0617 11:37:54.102819 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:54.430936 691879 kapi.go:86] Found 1 Pods for label selector kubernetes.io/minikube-addons=gcp-auth
I0617 11:37:54.430962 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:37:54.596371 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:54.908780 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:37:55.098483 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:55.408329 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:37:55.601235 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:55.907571 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:37:56.098372 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:56.408554 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:37:56.598272 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:56.908786 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:37:57.095631 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:57.408046 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:37:57.596305 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:57.907530 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:37:58.095647 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:58.407770 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:37:58.595392 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:58.908521 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:37:59.096130 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:59.408647 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:37:59.595226 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0617 11:37:59.908152 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:00.100640 691879 kapi.go:107] duration metric: took 49.014027342s to wait for kubernetes.io/minikube-addons=csi-hostpath-driver ...
I0617 11:38:00.408056 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:00.908172 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:01.407808 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:01.907871 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:02.408546 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:02.907612 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:03.408400 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:03.908102 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:04.407974 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:04.907536 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:05.407940 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:05.907685 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:06.407546 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:06.908545 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:07.407355 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:07.907831 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:08.408133 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:08.907753 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:09.408499 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:09.907659 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:10.408049 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:10.914541 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:11.408198 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:11.907396 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:12.408426 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:12.908247 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:13.407308 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:13.907590 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:14.408316 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:14.907830 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:15.415463 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:15.909014 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:16.408398 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:16.907719 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:17.408198 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:17.907643 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:18.408517 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:18.918073 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:19.407822 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:19.907678 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:20.407551 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:20.907993 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:21.408420 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:21.907717 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:22.408464 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:22.908507 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:23.407873 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:23.907573 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:24.409089 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:24.908857 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:25.408055 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:25.908471 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:26.408289 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:26.907974 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:27.409137 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:27.908072 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:28.407489 691879 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0617 11:38:28.908345 691879 kapi.go:107] duration metric: took 1m16.504073825s to wait for kubernetes.io/minikube-addons=gcp-auth ...
I0617 11:38:28.910378 691879 out.go:177] * Your GCP credentials will now be mounted into every pod created in the addons-134601 cluster.
I0617 11:38:28.912021 691879 out.go:177] * If you don't want your credentials mounted into a specific pod, add a label with the `gcp-auth-skip-secret` key to your pod configuration.
I0617 11:38:28.914098 691879 out.go:177] * If you want existing pods to be mounted with credentials, either recreate them or rerun addons enable with --refresh.
I0617 11:38:28.915806 691879 out.go:177] * Enabled addons: cloud-spanner, storage-provisioner, ingress-dns, nvidia-device-plugin, default-storageclass, volcano, metrics-server, inspektor-gadget, yakd, storage-provisioner-rancher, volumesnapshots, registry, ingress, csi-hostpath-driver, gcp-auth
I0617 11:38:28.917555 691879 addons.go:510] duration metric: took 1m29.355595505s for enable addons: enabled=[cloud-spanner storage-provisioner ingress-dns nvidia-device-plugin default-storageclass volcano metrics-server inspektor-gadget yakd storage-provisioner-rancher volumesnapshots registry ingress csi-hostpath-driver gcp-auth]
I0617 11:38:28.917613 691879 start.go:245] waiting for cluster config update ...
I0617 11:38:28.917639 691879 start.go:254] writing updated cluster config ...
I0617 11:38:28.917969 691879 ssh_runner.go:195] Run: rm -f paused
I0617 11:38:29.237022 691879 start.go:600] kubectl: 1.30.2, cluster: 1.30.1 (minor skew: 0)
I0617 11:38:29.239556 691879 out.go:177] * Done! kubectl is now configured to use "addons-134601" cluster and "default" namespace by default
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD
f12f553082aaf dd1b12fcb6097 9 seconds ago Exited hello-world-app 2 5307790b52f19 hello-world-app-86c47465fc-ct8bk
b6a0f77c2e369 11ceee7cdc572 31 seconds ago Running nginx 0 4f2764544a018 test-job-nginx-0
80e60f5317480 4f49228258b64 31 seconds ago Running nginx 0 0742d29f0e2e8 nginx
aeb66cd2e7488 9e1a67634369d 3 minutes ago Running headlamp 0 1931325846d30 headlamp-7fc69f7444-7sq4b
dab9cffb11023 6ef582f3ec844 3 minutes ago Running gcp-auth 0 33823b7628785 gcp-auth-5db96cd9b4-4xrk9
61d5a9fceacac 296b5f799fcd8 4 minutes ago Exited patch 0 26a5d1f1df32d ingress-nginx-admission-patch-6ml2z
982af2171b80d 296b5f799fcd8 4 minutes ago Exited create 0 7e2988965a166 ingress-nginx-admission-create-tzjrd
2abc3a78739cc 20e3f2db01e81 4 minutes ago Running yakd 0 1775a443b3da0 yakd-dashboard-5ddbf7d777-9bcsv
c7f63f903b7da 2437cf7621777 4 minutes ago Running coredns 0 3d2af0a084bc4 coredns-7db6d8ff4d-rcpbp
8aa65f85a74a1 ba04bb24b9575 4 minutes ago Running storage-provisioner 0 b47ff04e3ef36 storage-provisioner
53f6456d3e890 89d73d416b992 4 minutes ago Running kindnet-cni 0 7a7f60c8de029 kindnet-j89dc
b94d683b305ae 05eccb821e159 4 minutes ago Running kube-proxy 0 533630cf2c077 kube-proxy-8dp6r
f94e85b7dc638 988b55d423baf 4 minutes ago Running kube-apiserver 0 3d65bed900158 kube-apiserver-addons-134601
4de1d887d8c35 234ac56e455be 4 minutes ago Running kube-controller-manager 0 c2d9d63c4b164 kube-controller-manager-addons-134601
ca86d5880b83c 163ff818d154d 4 minutes ago Running kube-scheduler 0 0a8999a3690ec kube-scheduler-addons-134601
2f8d6cf85e07f 014faa467e297 4 minutes ago Running etcd 0 ba93be9aa6201 etcd-addons-134601
==> containerd <==
Jun 17 11:41:28 addons-134601 containerd[767]: time="2024-06-17T11:41:28.139574663Z" level=info msg="TearDown network for sandbox \"1c0724a441e7d8ed77cefb5691a808628e99ff66164cb06e4993e65958643fdc\" successfully"
Jun 17 11:41:28 addons-134601 containerd[767]: time="2024-06-17T11:41:28.139604496Z" level=info msg="StopPodSandbox for \"1c0724a441e7d8ed77cefb5691a808628e99ff66164cb06e4993e65958643fdc\" returns successfully"
Jun 17 11:41:28 addons-134601 containerd[767]: time="2024-06-17T11:41:28.654150872Z" level=info msg="RemoveContainer for \"96569571b2ed206fc4321b3992f95415414a9d49633dae9ede9ecc0e9dc3da10\""
Jun 17 11:41:28 addons-134601 containerd[767]: time="2024-06-17T11:41:28.668531634Z" level=info msg="RemoveContainer for \"96569571b2ed206fc4321b3992f95415414a9d49633dae9ede9ecc0e9dc3da10\" returns successfully"
Jun 17 11:41:28 addons-134601 containerd[767]: time="2024-06-17T11:41:28.670689837Z" level=info msg="RemoveContainer for \"e4b1a6a232b7d969dd35d4d4131761d44506f75970875c416283bc84853841ca\""
Jun 17 11:41:28 addons-134601 containerd[767]: time="2024-06-17T11:41:28.685177608Z" level=info msg="RemoveContainer for \"e4b1a6a232b7d969dd35d4d4131761d44506f75970875c416283bc84853841ca\" returns successfully"
Jun 17 11:41:30 addons-134601 containerd[767]: time="2024-06-17T11:41:30.334850702Z" level=info msg="StopContainer for \"a1fdf7ba7ad638253ef6e86be02fddb519226cb287f0272460753f5c92269eec\" with timeout 2 (s)"
Jun 17 11:41:30 addons-134601 containerd[767]: time="2024-06-17T11:41:30.335618873Z" level=info msg="Stop container \"a1fdf7ba7ad638253ef6e86be02fddb519226cb287f0272460753f5c92269eec\" with signal terminated"
Jun 17 11:41:32 addons-134601 containerd[767]: time="2024-06-17T11:41:32.342302800Z" level=info msg="Kill container \"a1fdf7ba7ad638253ef6e86be02fddb519226cb287f0272460753f5c92269eec\""
Jun 17 11:41:32 addons-134601 containerd[767]: time="2024-06-17T11:41:32.413890377Z" level=info msg="shim disconnected" id=a1fdf7ba7ad638253ef6e86be02fddb519226cb287f0272460753f5c92269eec
Jun 17 11:41:32 addons-134601 containerd[767]: time="2024-06-17T11:41:32.413959758Z" level=warning msg="cleaning up after shim disconnected" id=a1fdf7ba7ad638253ef6e86be02fddb519226cb287f0272460753f5c92269eec namespace=k8s.io
Jun 17 11:41:32 addons-134601 containerd[767]: time="2024-06-17T11:41:32.413973042Z" level=info msg="cleaning up dead shim"
Jun 17 11:41:32 addons-134601 containerd[767]: time="2024-06-17T11:41:32.423779527Z" level=warning msg="cleanup warnings time=\"2024-06-17T11:41:32Z\" level=info msg=\"starting signal loop\" namespace=k8s.io pid=11806 runtime=io.containerd.runc.v2\n"
Jun 17 11:41:32 addons-134601 containerd[767]: time="2024-06-17T11:41:32.426473536Z" level=info msg="StopContainer for \"a1fdf7ba7ad638253ef6e86be02fddb519226cb287f0272460753f5c92269eec\" returns successfully"
Jun 17 11:41:32 addons-134601 containerd[767]: time="2024-06-17T11:41:32.427077387Z" level=info msg="StopPodSandbox for \"8632c33e0badb2cbf99dbb2d9f82eb85e79af4bc413aa0b5f9aeddb2a8703c2c\""
Jun 17 11:41:32 addons-134601 containerd[767]: time="2024-06-17T11:41:32.427132630Z" level=info msg="Container to stop \"a1fdf7ba7ad638253ef6e86be02fddb519226cb287f0272460753f5c92269eec\" must be in running or unknown state, current state \"CONTAINER_EXITED\""
Jun 17 11:41:32 addons-134601 containerd[767]: time="2024-06-17T11:41:32.460197120Z" level=info msg="shim disconnected" id=8632c33e0badb2cbf99dbb2d9f82eb85e79af4bc413aa0b5f9aeddb2a8703c2c
Jun 17 11:41:32 addons-134601 containerd[767]: time="2024-06-17T11:41:32.460264597Z" level=warning msg="cleaning up after shim disconnected" id=8632c33e0badb2cbf99dbb2d9f82eb85e79af4bc413aa0b5f9aeddb2a8703c2c namespace=k8s.io
Jun 17 11:41:32 addons-134601 containerd[767]: time="2024-06-17T11:41:32.460278907Z" level=info msg="cleaning up dead shim"
Jun 17 11:41:32 addons-134601 containerd[767]: time="2024-06-17T11:41:32.467978109Z" level=warning msg="cleanup warnings time=\"2024-06-17T11:41:32Z\" level=info msg=\"starting signal loop\" namespace=k8s.io pid=11837 runtime=io.containerd.runc.v2\n"
Jun 17 11:41:32 addons-134601 containerd[767]: time="2024-06-17T11:41:32.512842473Z" level=info msg="TearDown network for sandbox \"8632c33e0badb2cbf99dbb2d9f82eb85e79af4bc413aa0b5f9aeddb2a8703c2c\" successfully"
Jun 17 11:41:32 addons-134601 containerd[767]: time="2024-06-17T11:41:32.512880962Z" level=info msg="StopPodSandbox for \"8632c33e0badb2cbf99dbb2d9f82eb85e79af4bc413aa0b5f9aeddb2a8703c2c\" returns successfully"
Jun 17 11:41:32 addons-134601 containerd[767]: time="2024-06-17T11:41:32.679116743Z" level=info msg="RemoveContainer for \"a1fdf7ba7ad638253ef6e86be02fddb519226cb287f0272460753f5c92269eec\""
Jun 17 11:41:32 addons-134601 containerd[767]: time="2024-06-17T11:41:32.684950602Z" level=info msg="RemoveContainer for \"a1fdf7ba7ad638253ef6e86be02fddb519226cb287f0272460753f5c92269eec\" returns successfully"
Jun 17 11:41:32 addons-134601 containerd[767]: time="2024-06-17T11:41:32.685519507Z" level=error msg="ContainerStatus for \"a1fdf7ba7ad638253ef6e86be02fddb519226cb287f0272460753f5c92269eec\" failed" error="rpc error: code = NotFound desc = an error occurred when try to find container \"a1fdf7ba7ad638253ef6e86be02fddb519226cb287f0272460753f5c92269eec\": not found"
==> coredns [c7f63f903b7dad5086bd3290c28145b8bf197be69aaf3235827d3e0a13cf4e01] <==
[INFO] 10.244.0.20:43141 - 1285 "AAAA IN hello-world-app.default.svc.cluster.local.cluster.local. udp 73 false 512" NXDOMAIN qr,aa,rd 166 0.000080088s
[INFO] 10.244.0.20:41503 - 45174 "A IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.002784066s
[INFO] 10.244.0.20:43141 - 35660 "A IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.001408427s
[INFO] 10.244.0.20:43141 - 47004 "AAAA IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.001520785s
[INFO] 10.244.0.20:41503 - 31218 "AAAA IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.001894668s
[INFO] 10.244.0.20:43141 - 25727 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000148583s
[INFO] 10.244.0.20:41503 - 8729 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000286959s
[INFO] 10.244.0.20:47878 - 26467 "A IN hello-world-app.default.svc.cluster.local.ingress-nginx.svc.cluster.local. udp 91 false 512" NXDOMAIN qr,aa,rd 184 0.000153309s
[INFO] 10.244.0.20:56284 - 26367 "A IN hello-world-app.default.svc.cluster.local.ingress-nginx.svc.cluster.local. udp 91 false 512" NXDOMAIN qr,aa,rd 184 0.00025496s
[INFO] 10.244.0.20:47878 - 39052 "AAAA IN hello-world-app.default.svc.cluster.local.ingress-nginx.svc.cluster.local. udp 91 false 512" NXDOMAIN qr,aa,rd 184 0.000078833s
[INFO] 10.244.0.20:47878 - 26604 "A IN hello-world-app.default.svc.cluster.local.svc.cluster.local. udp 77 false 512" NXDOMAIN qr,aa,rd 170 0.000123804s
[INFO] 10.244.0.20:47878 - 61782 "AAAA IN hello-world-app.default.svc.cluster.local.svc.cluster.local. udp 77 false 512" NXDOMAIN qr,aa,rd 170 0.000051289s
[INFO] 10.244.0.20:56284 - 22317 "A IN hello-world-app.default.svc.cluster.local.svc.cluster.local. udp 77 false 512" NXDOMAIN qr,aa,rd 170 0.000055473s
[INFO] 10.244.0.20:56284 - 40198 "AAAA IN hello-world-app.default.svc.cluster.local.ingress-nginx.svc.cluster.local. udp 91 false 512" NXDOMAIN qr,aa,rd 184 0.000276916s
[INFO] 10.244.0.20:56284 - 2880 "AAAA IN hello-world-app.default.svc.cluster.local.svc.cluster.local. udp 77 false 512" NXDOMAIN qr,aa,rd 170 0.000116174s
[INFO] 10.244.0.20:47878 - 26933 "A IN hello-world-app.default.svc.cluster.local.cluster.local. udp 73 false 512" NXDOMAIN qr,aa,rd 166 0.000061438s
[INFO] 10.244.0.20:56284 - 65127 "A IN hello-world-app.default.svc.cluster.local.cluster.local. udp 73 false 512" NXDOMAIN qr,aa,rd 166 0.000081311s
[INFO] 10.244.0.20:56284 - 4070 "AAAA IN hello-world-app.default.svc.cluster.local.cluster.local. udp 73 false 512" NXDOMAIN qr,aa,rd 166 0.000054653s
[INFO] 10.244.0.20:47878 - 260 "AAAA IN hello-world-app.default.svc.cluster.local.cluster.local. udp 73 false 512" NXDOMAIN qr,aa,rd 166 0.000081139s
[INFO] 10.244.0.20:56284 - 48748 "A IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.001417739s
[INFO] 10.244.0.20:47878 - 39010 "A IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.001031771s
[INFO] 10.244.0.20:56284 - 7221 "AAAA IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.001234277s
[INFO] 10.244.0.20:47878 - 15508 "AAAA IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.001800048s
[INFO] 10.244.0.20:56284 - 12853 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000067527s
[INFO] 10.244.0.20:47878 - 27430 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000107986s
==> describe nodes <==
Name: addons-134601
Roles: control-plane
Labels: beta.kubernetes.io/arch=arm64
beta.kubernetes.io/os=linux
kubernetes.io/arch=arm64
kubernetes.io/hostname=addons-134601
kubernetes.io/os=linux
minikube.k8s.io/commit=e6835d8632d8e28da57a827eb12d7b852b17a9f6
minikube.k8s.io/name=addons-134601
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2024_06_17T11_36_46_0700
minikube.k8s.io/version=v1.33.1
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
topology.hostpath.csi/node=addons-134601
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///run/containerd/containerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Mon, 17 Jun 2024 11:36:43 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: addons-134601
AcquireTime: <unset>
RenewTime: Mon, 17 Jun 2024 11:41:31 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Mon, 17 Jun 2024 11:41:21 +0000 Mon, 17 Jun 2024 11:36:40 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Mon, 17 Jun 2024 11:41:21 +0000 Mon, 17 Jun 2024 11:36:40 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Mon, 17 Jun 2024 11:41:21 +0000 Mon, 17 Jun 2024 11:36:40 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Mon, 17 Jun 2024 11:41:21 +0000 Mon, 17 Jun 2024 11:36:56 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.49.2
Hostname: addons-134601
Capacity:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022356Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022356Ki
pods: 110
System Info:
Machine ID: 8af08a43d0fe435e8dc85025eae20e3f
System UUID: 5fb4b37d-72f4-429e-8408-6674f979d6cc
Boot ID: 10e5c427-da39-4514-92df-ee3f91ef093f
Kernel Version: 5.15.0-1063-aws
OS Image: Ubuntu 22.04.4 LTS
Operating System: linux
Architecture: arm64
Container Runtime Version: containerd://1.6.33
Kubelet Version: v1.30.1
Kube-Proxy Version: v1.30.1
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (14 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default hello-world-app-86c47465fc-ct8bk 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 25s
default nginx 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 34s
gcp-auth gcp-auth-5db96cd9b4-4xrk9 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 3m43s
headlamp headlamp-7fc69f7444-7sq4b 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 3m7s
kube-system coredns-7db6d8ff4d-rcpbp 100m (5%!)(MISSING) 0 (0%!)(MISSING) 70Mi (0%!)(MISSING) 170Mi (2%!)(MISSING) 4m38s
kube-system etcd-addons-134601 100m (5%!)(MISSING) 0 (0%!)(MISSING) 100Mi (1%!)(MISSING) 0 (0%!)(MISSING) 4m51s
kube-system kindnet-j89dc 100m (5%!)(MISSING) 100m (5%!)(MISSING) 50Mi (0%!)(MISSING) 50Mi (0%!)(MISSING) 4m38s
kube-system kube-apiserver-addons-134601 250m (12%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 4m52s
kube-system kube-controller-manager-addons-134601 200m (10%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 4m51s
kube-system kube-proxy-8dp6r 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 4m38s
kube-system kube-scheduler-addons-134601 100m (5%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 4m51s
kube-system storage-provisioner 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 4m33s
my-volcano test-job-nginx-0 1 (50%!)(MISSING) 1 (50%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 2m40s
yakd-dashboard yakd-dashboard-5ddbf7d777-9bcsv 0 (0%!)(MISSING) 0 (0%!)(MISSING) 128Mi (1%!)(MISSING) 256Mi (3%!)(MISSING) 4m31s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 1850m (92%!)(MISSING) 1100m (55%!)(MISSING)
memory 348Mi (4%!)(MISSING) 476Mi (6%!)(MISSING)
ephemeral-storage 0 (0%!)(MISSING) 0 (0%!)(MISSING)
hugepages-1Gi 0 (0%!)(MISSING) 0 (0%!)(MISSING)
hugepages-2Mi 0 (0%!)(MISSING) 0 (0%!)(MISSING)
hugepages-32Mi 0 (0%!)(MISSING) 0 (0%!)(MISSING)
hugepages-64Ki 0 (0%!)(MISSING) 0 (0%!)(MISSING)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 4m36s kube-proxy
Normal Starting 4m52s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 4m52s kubelet Node addons-134601 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 4m52s kubelet Node addons-134601 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 4m52s kubelet Node addons-134601 status is now: NodeHasSufficientPID
Normal NodeNotReady 4m52s kubelet Node addons-134601 status is now: NodeNotReady
Normal NodeAllocatableEnforced 4m52s kubelet Updated Node Allocatable limit across pods
Normal NodeReady 4m41s kubelet Node addons-134601 status is now: NodeReady
Normal RegisteredNode 4m38s node-controller Node addons-134601 event: Registered Node addons-134601 in Controller
==> dmesg <==
[ +0.001027] FS-Cache: O-key=[8] '096fed0000000000'
[ +0.000700] FS-Cache: N-cookie c=0000001e [p=00000015 fl=2 nc=0 na=1]
[ +0.000954] FS-Cache: N-cookie d=00000000f5be1c10{9p.inode} n=00000000d77b19c4
[ +0.001040] FS-Cache: N-key=[8] '096fed0000000000'
[ +0.003119] FS-Cache: Duplicate cookie detected
[ +0.000692] FS-Cache: O-cookie c=00000018 [p=00000015 fl=226 nc=0 na=1]
[ +0.000977] FS-Cache: O-cookie d=00000000f5be1c10{9p.inode} n=00000000cac8d902
[ +0.001034] FS-Cache: O-key=[8] '096fed0000000000'
[ +0.000711] FS-Cache: N-cookie c=0000001f [p=00000015 fl=2 nc=0 na=1]
[ +0.000939] FS-Cache: N-cookie d=00000000f5be1c10{9p.inode} n=00000000e03cf592
[ +0.001051] FS-Cache: N-key=[8] '096fed0000000000'
[ +2.665516] FS-Cache: Duplicate cookie detected
[ +0.000684] FS-Cache: O-cookie c=00000016 [p=00000015 fl=226 nc=0 na=1]
[ +0.000934] FS-Cache: O-cookie d=00000000f5be1c10{9p.inode} n=0000000013106528
[ +0.001050] FS-Cache: O-key=[8] '086fed0000000000'
[ +0.000693] FS-Cache: N-cookie c=00000021 [p=00000015 fl=2 nc=0 na=1]
[ +0.000905] FS-Cache: N-cookie d=00000000f5be1c10{9p.inode} n=0000000079a885a5
[ +0.001017] FS-Cache: N-key=[8] '086fed0000000000'
[ +0.283712] FS-Cache: Duplicate cookie detected
[ +0.000687] FS-Cache: O-cookie c=0000001b [p=00000015 fl=226 nc=0 na=1]
[ +0.000989] FS-Cache: O-cookie d=00000000f5be1c10{9p.inode} n=00000000c764ff76
[ +0.001042] FS-Cache: O-key=[8] '0e6fed0000000000'
[ +0.000702] FS-Cache: N-cookie c=00000022 [p=00000015 fl=2 nc=0 na=1]
[ +0.000927] FS-Cache: N-cookie d=00000000f5be1c10{9p.inode} n=0000000066e1ff9a
[ +0.001048] FS-Cache: N-key=[8] '0e6fed0000000000'
==> etcd [2f8d6cf85e07f3e79e19823a63aa29a57dbf7f95d9973f7b470542bc7f65ef3c] <==
{"level":"info","ts":"2024-06-17T11:36:39.400096Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc switched to configuration voters=(12593026477526642892)"}
{"level":"info","ts":"2024-06-17T11:36:39.400192Z","caller":"membership/cluster.go:421","msg":"added member","cluster-id":"fa54960ea34d58be","local-member-id":"aec36adc501070cc","added-peer-id":"aec36adc501070cc","added-peer-peer-urls":["https://192.168.49.2:2380"]}
{"level":"info","ts":"2024-06-17T11:36:39.411145Z","caller":"embed/etcd.go:726","msg":"starting with client TLS","tls-info":"cert = /var/lib/minikube/certs/etcd/server.crt, key = /var/lib/minikube/certs/etcd/server.key, client-cert=, client-key=, trusted-ca = /var/lib/minikube/certs/etcd/ca.crt, client-cert-auth = true, crl-file = ","cipher-suites":[]}
{"level":"info","ts":"2024-06-17T11:36:39.411349Z","caller":"embed/etcd.go:277","msg":"now serving peer/client/metrics","local-member-id":"aec36adc501070cc","initial-advertise-peer-urls":["https://192.168.49.2:2380"],"listen-peer-urls":["https://192.168.49.2:2380"],"advertise-client-urls":["https://192.168.49.2:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.49.2:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
{"level":"info","ts":"2024-06-17T11:36:39.411374Z","caller":"embed/etcd.go:857","msg":"serving metrics","address":"http://127.0.0.1:2381"}
{"level":"info","ts":"2024-06-17T11:36:39.411471Z","caller":"embed/etcd.go:597","msg":"serving peer traffic","address":"192.168.49.2:2380"}
{"level":"info","ts":"2024-06-17T11:36:39.411487Z","caller":"embed/etcd.go:569","msg":"cmux::serve","address":"192.168.49.2:2380"}
{"level":"info","ts":"2024-06-17T11:36:39.787484Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc is starting a new election at term 1"}
{"level":"info","ts":"2024-06-17T11:36:39.78759Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became pre-candidate at term 1"}
{"level":"info","ts":"2024-06-17T11:36:39.787636Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc received MsgPreVoteResp from aec36adc501070cc at term 1"}
{"level":"info","ts":"2024-06-17T11:36:39.787685Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became candidate at term 2"}
{"level":"info","ts":"2024-06-17T11:36:39.78772Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc received MsgVoteResp from aec36adc501070cc at term 2"}
{"level":"info","ts":"2024-06-17T11:36:39.787777Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became leader at term 2"}
{"level":"info","ts":"2024-06-17T11:36:39.787839Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: aec36adc501070cc elected leader aec36adc501070cc at term 2"}
{"level":"info","ts":"2024-06-17T11:36:39.791669Z","caller":"etcdserver/server.go:2068","msg":"published local member to cluster through raft","local-member-id":"aec36adc501070cc","local-member-attributes":"{Name:addons-134601 ClientURLs:[https://192.168.49.2:2379]}","request-path":"/0/members/aec36adc501070cc/attributes","cluster-id":"fa54960ea34d58be","publish-timeout":"7s"}
{"level":"info","ts":"2024-06-17T11:36:39.791869Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2024-06-17T11:36:39.792213Z","caller":"etcdserver/server.go:2578","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2024-06-17T11:36:39.792358Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2024-06-17T11:36:39.792548Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2024-06-17T11:36:39.792593Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2024-06-17T11:36:39.794132Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"info","ts":"2024-06-17T11:36:39.800917Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.49.2:2379"}
{"level":"info","ts":"2024-06-17T11:36:39.800991Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"fa54960ea34d58be","local-member-id":"aec36adc501070cc","cluster-version":"3.5"}
{"level":"info","ts":"2024-06-17T11:36:39.843756Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2024-06-17T11:36:39.843876Z","caller":"etcdserver/server.go:2602","msg":"cluster version is updated","cluster-version":"3.5"}
==> gcp-auth [dab9cffb11023ee47a311724e12224e0f1a8ccde8552c7cee1a1246472156360] <==
2024/06/17 11:38:30 Ready to write response ...
2024/06/17 11:38:30 Ready to marshal response ...
2024/06/17 11:38:30 Ready to write response ...
2024/06/17 11:38:30 Ready to marshal response ...
2024/06/17 11:38:30 Ready to write response ...
2024/06/17 11:38:40 Ready to marshal response ...
2024/06/17 11:38:40 Ready to write response ...
2024/06/17 11:38:56 Ready to marshal response ...
2024/06/17 11:38:56 Ready to write response ...
2024/06/17 11:38:57 Ready to marshal response ...
2024/06/17 11:38:57 Ready to write response ...
2024/06/17 11:38:57 Ready to marshal response ...
2024/06/17 11:38:57 Ready to write response ...
2024/06/17 11:38:57 Ready to marshal response ...
2024/06/17 11:38:57 Ready to write response ...
2024/06/17 11:39:04 Ready to marshal response ...
2024/06/17 11:39:04 Ready to write response ...
2024/06/17 11:40:15 Ready to marshal response ...
2024/06/17 11:40:15 Ready to write response ...
2024/06/17 11:40:30 Ready to marshal response ...
2024/06/17 11:40:30 Ready to write response ...
2024/06/17 11:41:03 Ready to marshal response ...
2024/06/17 11:41:03 Ready to write response ...
2024/06/17 11:41:12 Ready to marshal response ...
2024/06/17 11:41:12 Ready to write response ...
==> kernel <==
11:41:37 up 3:24, 0 users, load average: 1.06, 1.47, 1.93
Linux addons-134601 5.15.0-1063-aws #69~20.04.1-Ubuntu SMP Fri May 10 19:21:30 UTC 2024 aarch64 aarch64 aarch64 GNU/Linux
PRETTY_NAME="Ubuntu 22.04.4 LTS"
==> kindnet [53f6456d3e890f3b76b502e2b8e96feb926160aca72356e9f5a62689ce60874d] <==
I0617 11:39:34.174363 1 main.go:227] handling current node
I0617 11:39:44.187265 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
I0617 11:39:44.187291 1 main.go:227] handling current node
I0617 11:39:54.204307 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
I0617 11:39:54.204337 1 main.go:227] handling current node
I0617 11:40:04.217892 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
I0617 11:40:04.217923 1 main.go:227] handling current node
I0617 11:40:14.236854 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
I0617 11:40:14.236889 1 main.go:227] handling current node
I0617 11:40:24.248005 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
I0617 11:40:24.248034 1 main.go:227] handling current node
I0617 11:40:34.252243 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
I0617 11:40:34.252271 1 main.go:227] handling current node
I0617 11:40:44.264295 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
I0617 11:40:44.264324 1 main.go:227] handling current node
I0617 11:40:54.271800 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
I0617 11:40:54.271827 1 main.go:227] handling current node
I0617 11:41:04.287203 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
I0617 11:41:04.287251 1 main.go:227] handling current node
I0617 11:41:14.311076 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
I0617 11:41:14.311111 1 main.go:227] handling current node
I0617 11:41:24.320800 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
I0617 11:41:24.320828 1 main.go:227] handling current node
I0617 11:41:34.337926 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
I0617 11:41:34.338030 1 main.go:227] handling current node
==> kube-apiserver [f94e85b7dc638e3aca11f5a296f3bcd7a10b4e8c0701da795ed293df7e3b26de] <==
I0617 11:40:46.203794 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0617 11:40:46.203936 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0617 11:40:46.327969 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0617 11:40:46.328005 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
W0617 11:40:47.130411 1 cacher.go:168] Terminating all watchers from cacher volumesnapshotclasses.snapshot.storage.k8s.io
W0617 11:40:47.328081 1 cacher.go:168] Terminating all watchers from cacher volumesnapshotcontents.snapshot.storage.k8s.io
W0617 11:40:47.337742 1 cacher.go:168] Terminating all watchers from cacher volumesnapshots.snapshot.storage.k8s.io
I0617 11:40:52.009769 1 handler.go:286] Adding GroupVersion gadget.kinvolk.io v1alpha1 to ResourceManager
W0617 11:40:53.045955 1 cacher.go:168] Terminating all watchers from cacher traces.gadget.kinvolk.io
I0617 11:41:03.404130 1 controller.go:615] quota admission added evaluator for: ingresses.networking.k8s.io
I0617 11:41:03.656810 1 alloc.go:330] "allocated clusterIPs" service="default/nginx" clusterIPs={"IPv4":"10.96.188.7"}
I0617 11:41:12.326154 1 alloc.go:330] "allocated clusterIPs" service="default/hello-world-app" clusterIPs={"IPv4":"10.99.245.45"}
I0617 11:41:14.239736 1 handler.go:286] Adding GroupVersion batch.volcano.sh v1alpha1 to ResourceManager
I0617 11:41:14.317436 1 handler.go:286] Adding GroupVersion bus.volcano.sh v1alpha1 to ResourceManager
I0617 11:41:14.786703 1 handler.go:286] Adding GroupVersion scheduling.volcano.sh v1beta1 to ResourceManager
I0617 11:41:14.815006 1 handler.go:286] Adding GroupVersion scheduling.volcano.sh v1beta1 to ResourceManager
I0617 11:41:14.866760 1 handler.go:286] Adding GroupVersion nodeinfo.volcano.sh v1alpha1 to ResourceManager
I0617 11:41:14.908085 1 handler.go:286] Adding GroupVersion scheduling.volcano.sh v1beta1 to ResourceManager
W0617 11:41:15.365744 1 cacher.go:168] Terminating all watchers from cacher commands.bus.volcano.sh
W0617 11:41:15.908690 1 cacher.go:168] Terminating all watchers from cacher podgroups.scheduling.volcano.sh
W0617 11:41:15.977500 1 cacher.go:168] Terminating all watchers from cacher queues.scheduling.volcano.sh
W0617 11:41:15.982823 1 cacher.go:168] Terminating all watchers from cacher numatopologies.nodeinfo.volcano.sh
W0617 11:41:16.165343 1 cacher.go:168] Terminating all watchers from cacher jobs.batch.volcano.sh
I0617 11:41:27.106840 1 controller.go:129] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Nothing (removed from the queue).
E0617 11:41:29.403218 1 authentication.go:73] "Unable to authenticate the request" err="[invalid bearer token, serviceaccounts \"ingress-nginx\" not found]"
==> kube-controller-manager [4de1d887d8c35f62732d69772df12c19c5c5830780e3feaba11d461addf04d2f] <==
E0617 11:41:25.468511 1 reflector.go:150] k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0617 11:41:26.846373 1 reflector.go:547] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0617 11:41:26.846557 1 reflector.go:150] k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0617 11:41:28.683734 1 replica_set.go:676] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="default/hello-world-app-86c47465fc" duration="53.734µs"
I0617 11:41:29.312689 1 job_controller.go:566] "enqueueing job" logger="job-controller" key="ingress-nginx/ingress-nginx-admission-create"
I0617 11:41:29.315294 1 replica_set.go:676] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="ingress-nginx/ingress-nginx-controller-768f948f8f" duration="4.997µs"
I0617 11:41:29.326046 1 job_controller.go:566] "enqueueing job" logger="job-controller" key="ingress-nginx/ingress-nginx-admission-patch"
I0617 11:41:29.779924 1 shared_informer.go:313] Waiting for caches to sync for resource quota
I0617 11:41:29.779961 1 shared_informer.go:320] Caches are synced for resource quota
I0617 11:41:30.290425 1 shared_informer.go:313] Waiting for caches to sync for garbage collector
I0617 11:41:30.290730 1 shared_informer.go:320] Caches are synced for garbage collector
W0617 11:41:30.639778 1 reflector.go:547] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0617 11:41:30.639820 1 reflector.go:150] k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0617 11:41:31.950981 1 reflector.go:547] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0617 11:41:31.951020 1 reflector.go:150] k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0617 11:41:32.053256 1 reflector.go:547] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0617 11:41:32.053358 1 reflector.go:150] k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0617 11:41:34.355476 1 reflector.go:547] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0617 11:41:34.355738 1 reflector.go:150] k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0617 11:41:34.743846 1 reflector.go:547] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0617 11:41:34.743885 1 reflector.go:150] k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0617 11:41:36.645753 1 reflector.go:547] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0617 11:41:36.645806 1 reflector.go:150] k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
W0617 11:41:36.781447 1 reflector.go:547] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0617 11:41:36.781483 1 reflector.go:150] k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
==> kube-proxy [b94d683b305ae640dc23f8bc01a8ad1602d1465279bd194f555c8b3fd7cd69bc] <==
I0617 11:37:01.590460 1 server_linux.go:69] "Using iptables proxy"
I0617 11:37:01.612624 1 server.go:1062] "Successfully retrieved node IP(s)" IPs=["192.168.49.2"]
I0617 11:37:01.684334 1 server.go:659] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I0617 11:37:01.684422 1 server_linux.go:165] "Using iptables Proxier"
I0617 11:37:01.688460 1 server_linux.go:511] "Detect-local-mode set to ClusterCIDR, but no cluster CIDR for family" ipFamily="IPv6"
I0617 11:37:01.688495 1 server_linux.go:528] "Defaulting to no-op detect-local"
I0617 11:37:01.688571 1 proxier.go:243] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses"
I0617 11:37:01.688831 1 server.go:872] "Version info" version="v1.30.1"
I0617 11:37:01.688855 1 server.go:874] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I0617 11:37:01.695641 1 config.go:319] "Starting node config controller"
I0617 11:37:01.695662 1 shared_informer.go:313] Waiting for caches to sync for node config
I0617 11:37:01.697774 1 config.go:192] "Starting service config controller"
I0617 11:37:01.697788 1 shared_informer.go:313] Waiting for caches to sync for service config
I0617 11:37:01.697809 1 config.go:101] "Starting endpoint slice config controller"
I0617 11:37:01.697814 1 shared_informer.go:313] Waiting for caches to sync for endpoint slice config
I0617 11:37:01.797940 1 shared_informer.go:320] Caches are synced for node config
I0617 11:37:01.797996 1 shared_informer.go:320] Caches are synced for endpoint slice config
I0617 11:37:01.798031 1 shared_informer.go:320] Caches are synced for service config
==> kube-scheduler [ca86d5880b83c7a0eb33cc70423e88eb21f8fa56032af302cd47d56c57ce1406] <==
W0617 11:36:43.741596 1 reflector.go:547] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E0617 11:36:43.744488 1 reflector.go:150] k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
W0617 11:36:43.744723 1 reflector.go:547] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E0617 11:36:43.744862 1 reflector.go:150] k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
W0617 11:36:43.744865 1 reflector.go:547] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
W0617 11:36:43.745041 1 reflector.go:547] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E0617 11:36:43.745067 1 reflector.go:150] k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E0617 11:36:43.745046 1 reflector.go:150] k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
W0617 11:36:43.744934 1 reflector.go:547] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
E0617 11:36:43.745109 1 reflector.go:150] k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
W0617 11:36:43.744980 1 reflector.go:547] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
E0617 11:36:43.745130 1 reflector.go:150] k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
W0617 11:36:43.744903 1 reflector.go:547] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
E0617 11:36:43.745153 1 reflector.go:150] k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
W0617 11:36:43.745194 1 reflector.go:547] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E0617 11:36:43.745210 1 reflector.go:150] k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
W0617 11:36:43.745270 1 reflector.go:547] k8s.io/client-go/informers/factory.go:160: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
E0617 11:36:43.745286 1 reflector.go:150] k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
W0617 11:36:43.745335 1 reflector.go:547] k8s.io/client-go/informers/factory.go:160: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E0617 11:36:43.745350 1 reflector.go:150] k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
W0617 11:36:43.744787 1 reflector.go:547] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E0617 11:36:43.745365 1 reflector.go:150] k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
W0617 11:36:43.745540 1 reflector.go:547] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
E0617 11:36:43.745616 1 reflector.go:150] k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
I0617 11:36:44.837852 1 shared_informer.go:320] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Jun 17 11:41:17 addons-134601 kubelet[1505]: E0617 11:41:17.619710 1505 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"hello-world-app\" with CrashLoopBackOff: \"back-off 10s restarting failed container=hello-world-app pod=hello-world-app-86c47465fc-ct8bk_default(f6ccd338-2425-4df1-a9d4-bdb964684e30)\"" pod="default/hello-world-app-86c47465fc-ct8bk" podUID="f6ccd338-2425-4df1-a9d4-bdb964684e30"
Jun 17 11:41:21 addons-134601 kubelet[1505]: I0617 11:41:21.848222 1505 scope.go:117] "RemoveContainer" containerID="96569571b2ed206fc4321b3992f95415414a9d49633dae9ede9ecc0e9dc3da10"
Jun 17 11:41:21 addons-134601 kubelet[1505]: E0617 11:41:21.848975 1505 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"minikube-ingress-dns\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=minikube-ingress-dns pod=kube-ingress-dns-minikube_kube-system(1a0d9cf5-35cb-4250-8fe7-9e6ff7aadad9)\"" pod="kube-system/kube-ingress-dns-minikube" podUID="1a0d9cf5-35cb-4250-8fe7-9e6ff7aadad9"
Jun 17 11:41:27 addons-134601 kubelet[1505]: I0617 11:41:27.848127 1505 scope.go:117] "RemoveContainer" containerID="e4b1a6a232b7d969dd35d4d4131761d44506f75970875c416283bc84853841ca"
Jun 17 11:41:28 addons-134601 kubelet[1505]: I0617 11:41:28.233710 1505 reconciler_common.go:161] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wd5zd\" (UniqueName: \"kubernetes.io/projected/1a0d9cf5-35cb-4250-8fe7-9e6ff7aadad9-kube-api-access-wd5zd\") pod \"1a0d9cf5-35cb-4250-8fe7-9e6ff7aadad9\" (UID: \"1a0d9cf5-35cb-4250-8fe7-9e6ff7aadad9\") "
Jun 17 11:41:28 addons-134601 kubelet[1505]: I0617 11:41:28.235811 1505 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1a0d9cf5-35cb-4250-8fe7-9e6ff7aadad9-kube-api-access-wd5zd" (OuterVolumeSpecName: "kube-api-access-wd5zd") pod "1a0d9cf5-35cb-4250-8fe7-9e6ff7aadad9" (UID: "1a0d9cf5-35cb-4250-8fe7-9e6ff7aadad9"). InnerVolumeSpecName "kube-api-access-wd5zd". PluginName "kubernetes.io/projected", VolumeGidValue ""
Jun 17 11:41:28 addons-134601 kubelet[1505]: I0617 11:41:28.334716 1505 reconciler_common.go:289] "Volume detached for volume \"kube-api-access-wd5zd\" (UniqueName: \"kubernetes.io/projected/1a0d9cf5-35cb-4250-8fe7-9e6ff7aadad9-kube-api-access-wd5zd\") on node \"addons-134601\" DevicePath \"\""
Jun 17 11:41:28 addons-134601 kubelet[1505]: I0617 11:41:28.646814 1505 scope.go:117] "RemoveContainer" containerID="96569571b2ed206fc4321b3992f95415414a9d49633dae9ede9ecc0e9dc3da10"
Jun 17 11:41:28 addons-134601 kubelet[1505]: I0617 11:41:28.664930 1505 scope.go:117] "RemoveContainer" containerID="f12f553082aaf4600f346119af48bd5a435ede6a5665f5486e5b7358ab17e063"
Jun 17 11:41:28 addons-134601 kubelet[1505]: E0617 11:41:28.665326 1505 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"hello-world-app\" with CrashLoopBackOff: \"back-off 20s restarting failed container=hello-world-app pod=hello-world-app-86c47465fc-ct8bk_default(f6ccd338-2425-4df1-a9d4-bdb964684e30)\"" pod="default/hello-world-app-86c47465fc-ct8bk" podUID="f6ccd338-2425-4df1-a9d4-bdb964684e30"
Jun 17 11:41:28 addons-134601 kubelet[1505]: I0617 11:41:28.668881 1505 scope.go:117] "RemoveContainer" containerID="e4b1a6a232b7d969dd35d4d4131761d44506f75970875c416283bc84853841ca"
Jun 17 11:41:29 addons-134601 kubelet[1505]: I0617 11:41:29.850218 1505 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="115d4c2f-cce1-4da3-a7c9-b7b58698fbea" path="/var/lib/kubelet/pods/115d4c2f-cce1-4da3-a7c9-b7b58698fbea/volumes"
Jun 17 11:41:29 addons-134601 kubelet[1505]: I0617 11:41:29.850603 1505 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1a0d9cf5-35cb-4250-8fe7-9e6ff7aadad9" path="/var/lib/kubelet/pods/1a0d9cf5-35cb-4250-8fe7-9e6ff7aadad9/volumes"
Jun 17 11:41:29 addons-134601 kubelet[1505]: I0617 11:41:29.851608 1505 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="24873dc8-4ebb-4e01-8878-a39035ba2418" path="/var/lib/kubelet/pods/24873dc8-4ebb-4e01-8878-a39035ba2418/volumes"
Jun 17 11:41:32 addons-134601 kubelet[1505]: I0617 11:41:32.663035 1505 reconciler_common.go:161] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/703378dc-b812-48fb-b127-fdb7adc9688c-webhook-cert\") pod \"703378dc-b812-48fb-b127-fdb7adc9688c\" (UID: \"703378dc-b812-48fb-b127-fdb7adc9688c\") "
Jun 17 11:41:32 addons-134601 kubelet[1505]: I0617 11:41:32.663081 1505 reconciler_common.go:161] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k87g8\" (UniqueName: \"kubernetes.io/projected/703378dc-b812-48fb-b127-fdb7adc9688c-kube-api-access-k87g8\") pod \"703378dc-b812-48fb-b127-fdb7adc9688c\" (UID: \"703378dc-b812-48fb-b127-fdb7adc9688c\") "
Jun 17 11:41:32 addons-134601 kubelet[1505]: I0617 11:41:32.666013 1505 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/703378dc-b812-48fb-b127-fdb7adc9688c-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "703378dc-b812-48fb-b127-fdb7adc9688c" (UID: "703378dc-b812-48fb-b127-fdb7adc9688c"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue ""
Jun 17 11:41:32 addons-134601 kubelet[1505]: I0617 11:41:32.666323 1505 operation_generator.go:887] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/703378dc-b812-48fb-b127-fdb7adc9688c-kube-api-access-k87g8" (OuterVolumeSpecName: "kube-api-access-k87g8") pod "703378dc-b812-48fb-b127-fdb7adc9688c" (UID: "703378dc-b812-48fb-b127-fdb7adc9688c"). InnerVolumeSpecName "kube-api-access-k87g8". PluginName "kubernetes.io/projected", VolumeGidValue ""
Jun 17 11:41:32 addons-134601 kubelet[1505]: I0617 11:41:32.677281 1505 scope.go:117] "RemoveContainer" containerID="a1fdf7ba7ad638253ef6e86be02fddb519226cb287f0272460753f5c92269eec"
Jun 17 11:41:32 addons-134601 kubelet[1505]: I0617 11:41:32.685226 1505 scope.go:117] "RemoveContainer" containerID="a1fdf7ba7ad638253ef6e86be02fddb519226cb287f0272460753f5c92269eec"
Jun 17 11:41:32 addons-134601 kubelet[1505]: E0617 11:41:32.685749 1505 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = an error occurred when try to find container \"a1fdf7ba7ad638253ef6e86be02fddb519226cb287f0272460753f5c92269eec\": not found" containerID="a1fdf7ba7ad638253ef6e86be02fddb519226cb287f0272460753f5c92269eec"
Jun 17 11:41:32 addons-134601 kubelet[1505]: I0617 11:41:32.685856 1505 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"containerd","ID":"a1fdf7ba7ad638253ef6e86be02fddb519226cb287f0272460753f5c92269eec"} err="failed to get container status \"a1fdf7ba7ad638253ef6e86be02fddb519226cb287f0272460753f5c92269eec\": rpc error: code = NotFound desc = an error occurred when try to find container \"a1fdf7ba7ad638253ef6e86be02fddb519226cb287f0272460753f5c92269eec\": not found"
Jun 17 11:41:32 addons-134601 kubelet[1505]: I0617 11:41:32.763579 1505 reconciler_common.go:289] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/703378dc-b812-48fb-b127-fdb7adc9688c-webhook-cert\") on node \"addons-134601\" DevicePath \"\""
Jun 17 11:41:32 addons-134601 kubelet[1505]: I0617 11:41:32.763623 1505 reconciler_common.go:289] "Volume detached for volume \"kube-api-access-k87g8\" (UniqueName: \"kubernetes.io/projected/703378dc-b812-48fb-b127-fdb7adc9688c-kube-api-access-k87g8\") on node \"addons-134601\" DevicePath \"\""
Jun 17 11:41:33 addons-134601 kubelet[1505]: I0617 11:41:33.849960 1505 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="703378dc-b812-48fb-b127-fdb7adc9688c" path="/var/lib/kubelet/pods/703378dc-b812-48fb-b127-fdb7adc9688c/volumes"
==> storage-provisioner [8aa65f85a74a1420b1c64e7c927f7f4e6d603c3032a29883f963d2df5938cf18] <==
I0617 11:37:05.910694 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I0617 11:37:05.926252 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I0617 11:37:05.926295 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I0617 11:37:05.939906 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I0617 11:37:05.941934 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"e1c8b45c-ce85-409e-a496-4c4ec1557e47", APIVersion:"v1", ResourceVersion:"538", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' addons-134601_ae8194fa-0603-4320-bda2-7ae7a70aeb8e became leader
I0617 11:37:05.942252 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_addons-134601_ae8194fa-0603-4320-bda2-7ae7a70aeb8e!
I0617 11:37:06.043648 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_addons-134601_ae8194fa-0603-4320-bda2-7ae7a70aeb8e!
-- /stdout --
helpers_test.go:254: (dbg) Run: out/minikube-linux-arm64 status --format={{.APIServer}} -p addons-134601 -n addons-134601
helpers_test.go:261: (dbg) Run: kubectl --context addons-134601 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:285: <<< TestAddons/parallel/Ingress FAILED: end of post-mortem logs <<<
helpers_test.go:286: ---------------------/post-mortem---------------------------------
--- FAIL: TestAddons/parallel/Ingress (35.71s)