=== RUN TestAddons/parallel/Registry
=== PAUSE TestAddons/parallel/Registry
=== CONT TestAddons/parallel/Registry
addons_test.go:328: registry stabilized in 1.950457ms
addons_test.go:330: (dbg) TestAddons/parallel/Registry: waiting 6m0s for pods matching "actual-registry=true" in namespace "kube-system" ...
helpers_test.go:344: "registry-66c9cd494c-cvtgf" [58047cca-a75a-41df-bc9f-a91dc5a547ca] Running
addons_test.go:330: (dbg) TestAddons/parallel/Registry: actual-registry=true healthy within 5.002440512s
addons_test.go:333: (dbg) TestAddons/parallel/Registry: waiting 10m0s for pods matching "registry-proxy=true" in namespace "kube-system" ...
helpers_test.go:344: "registry-proxy-f7ltj" [7dfd72e6-65ac-495b-9832-0e900d22d7e6] Running
addons_test.go:333: (dbg) TestAddons/parallel/Registry: registry-proxy=true healthy within 5.002630036s
addons_test.go:338: (dbg) Run: kubectl --context addons-315216 delete po -l run=registry-test --now
addons_test.go:343: (dbg) Run: kubectl --context addons-315216 run --rm registry-test --restart=Never --image=gcr.io/k8s-minikube/busybox -it -- sh -c "wget --spider -S http://registry.kube-system.svc.cluster.local"
addons_test.go:343: (dbg) Non-zero exit: kubectl --context addons-315216 run --rm registry-test --restart=Never --image=gcr.io/k8s-minikube/busybox -it -- sh -c "wget --spider -S http://registry.kube-system.svc.cluster.local": exit status 1 (1m0.068810795s)
-- stdout --
pod "registry-test" deleted
-- /stdout --
** stderr **
error: timed out waiting for the condition
** /stderr **
addons_test.go:345: failed to hit registry.kube-system.svc.cluster.local. args "kubectl --context addons-315216 run --rm registry-test --restart=Never --image=gcr.io/k8s-minikube/busybox -it -- sh -c \"wget --spider -S http://registry.kube-system.svc.cluster.local\"" failed: exit status 1
addons_test.go:349: expected curl response be "HTTP/1.1 200", but got *pod "registry-test" deleted
*
addons_test.go:357: (dbg) Run: out/minikube-linux-amd64 -p addons-315216 ip
2024/09/13 18:33:55 [DEBUG] GET http://192.168.49.2:5000
addons_test.go:386: (dbg) Run: out/minikube-linux-amd64 -p addons-315216 addons disable registry --alsologtostderr -v=1
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:230: ======> post-mortem[TestAddons/parallel/Registry]: docker inspect <======
helpers_test.go:231: (dbg) Run: docker inspect addons-315216
helpers_test.go:235: (dbg) docker inspect addons-315216:
-- stdout --
[
{
"Id": "597eb6e49b799c2c80ebeda6fe4177eac5914f7d15d7d4f6d592a9307b9dd73a",
"Created": "2024-09-13T18:21:00.949028173Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 12513,
"ExitCode": 0,
"Error": "",
"StartedAt": "2024-09-13T18:21:01.074591628Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:0dbfa4e10de72ad5b19052941be6a0a31f45a6011543305e0465758d912df3d4",
"ResolvConfPath": "/var/lib/docker/containers/597eb6e49b799c2c80ebeda6fe4177eac5914f7d15d7d4f6d592a9307b9dd73a/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/597eb6e49b799c2c80ebeda6fe4177eac5914f7d15d7d4f6d592a9307b9dd73a/hostname",
"HostsPath": "/var/lib/docker/containers/597eb6e49b799c2c80ebeda6fe4177eac5914f7d15d7d4f6d592a9307b9dd73a/hosts",
"LogPath": "/var/lib/docker/containers/597eb6e49b799c2c80ebeda6fe4177eac5914f7d15d7d4f6d592a9307b9dd73a/597eb6e49b799c2c80ebeda6fe4177eac5914f7d15d7d4f6d592a9307b9dd73a-json.log",
"Name": "/addons-315216",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"addons-315216:/var",
"/lib/modules:/lib/modules:ro"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {
"max-size": "100m"
}
},
"NetworkMode": "addons-315216",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 4194304000,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 8388608000,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"LowerDir": "/var/lib/docker/overlay2/cd747b1eb8e45dbe960648592581b099af7779cbf13094bf4b15e3727b2c3b1b-init/diff:/var/lib/docker/overlay2/e031d5ccc36ea399e2cfba0c5681544df856a4f055e288cbaea3e8f185f63675/diff",
"MergedDir": "/var/lib/docker/overlay2/cd747b1eb8e45dbe960648592581b099af7779cbf13094bf4b15e3727b2c3b1b/merged",
"UpperDir": "/var/lib/docker/overlay2/cd747b1eb8e45dbe960648592581b099af7779cbf13094bf4b15e3727b2c3b1b/diff",
"WorkDir": "/var/lib/docker/overlay2/cd747b1eb8e45dbe960648592581b099af7779cbf13094bf4b15e3727b2c3b1b/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "volume",
"Name": "addons-315216",
"Source": "/var/lib/docker/volumes/addons-315216/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
},
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
}
],
"Config": {
"Hostname": "addons-315216",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726193793-19634@sha256:4434bf9c4c4590e602ea482d2337d9d858a3db898bec2a85c17f78c81593c44e",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "addons-315216",
"name.minikube.sigs.k8s.io": "addons-315216",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "9b76faff0cac9600db0f94d9c74357424a1defc891e3e0cf0df8516e8a4d997c",
"SandboxKey": "/var/run/docker/netns/9b76faff0cac",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32768"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32769"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32772"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32770"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32771"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"addons-315216": {
"IPAMConfig": {
"IPv4Address": "192.168.49.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "02:42:c0:a8:31:02",
"DriverOpts": null,
"NetworkID": "7ae42d435adaf1ee1f67d007ea424a7745532b78bcb0da4cb69d5ac692087e5b",
"EndpointID": "c3b6f36f89bbdedea52a50d42fc7edd52ba979da79a15f30e0c6026678d21542",
"Gateway": "192.168.49.1",
"IPAddress": "192.168.49.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"addons-315216",
"597eb6e49b79"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:239: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p addons-315216 -n addons-315216
helpers_test.go:244: <<< TestAddons/parallel/Registry FAILED: start of post-mortem logs <<<
helpers_test.go:245: ======> post-mortem[TestAddons/parallel/Registry]: minikube logs <======
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 -p addons-315216 logs -n 25
helpers_test.go:252: TestAddons/parallel/Registry logs:
-- stdout --
==> Audit <==
|---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
| Command | Args | Profile | User | Version | Start Time | End Time |
|---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
| start | --download-only -p | download-docker-782708 | jenkins | v1.34.0 | 13 Sep 24 18:20 UTC | |
| | download-docker-782708 | | | | | |
| | --alsologtostderr | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| delete | -p download-docker-782708 | download-docker-782708 | jenkins | v1.34.0 | 13 Sep 24 18:20 UTC | 13 Sep 24 18:20 UTC |
| start | --download-only -p | binary-mirror-486689 | jenkins | v1.34.0 | 13 Sep 24 18:20 UTC | |
| | binary-mirror-486689 | | | | | |
| | --alsologtostderr | | | | | |
| | --binary-mirror | | | | | |
| | http://127.0.0.1:41715 | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| delete | -p binary-mirror-486689 | binary-mirror-486689 | jenkins | v1.34.0 | 13 Sep 24 18:20 UTC | 13 Sep 24 18:20 UTC |
| addons | enable dashboard -p | addons-315216 | jenkins | v1.34.0 | 13 Sep 24 18:20 UTC | |
| | addons-315216 | | | | | |
| addons | disable dashboard -p | addons-315216 | jenkins | v1.34.0 | 13 Sep 24 18:20 UTC | |
| | addons-315216 | | | | | |
| start | -p addons-315216 --wait=true | addons-315216 | jenkins | v1.34.0 | 13 Sep 24 18:20 UTC | 13 Sep 24 18:24 UTC |
| | --memory=4000 --alsologtostderr | | | | | |
| | --addons=registry | | | | | |
| | --addons=metrics-server | | | | | |
| | --addons=volumesnapshots | | | | | |
| | --addons=csi-hostpath-driver | | | | | |
| | --addons=gcp-auth | | | | | |
| | --addons=cloud-spanner | | | | | |
| | --addons=inspektor-gadget | | | | | |
| | --addons=storage-provisioner-rancher | | | | | |
| | --addons=nvidia-device-plugin | | | | | |
| | --addons=yakd --addons=volcano | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| | --addons=ingress | | | | | |
| | --addons=ingress-dns | | | | | |
| addons | addons-315216 addons disable | addons-315216 | jenkins | v1.34.0 | 13 Sep 24 18:24 UTC | 13 Sep 24 18:24 UTC |
| | volcano --alsologtostderr -v=1 | | | | | |
| addons | enable headlamp | addons-315216 | jenkins | v1.34.0 | 13 Sep 24 18:32 UTC | 13 Sep 24 18:32 UTC |
| | -p addons-315216 | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | disable inspektor-gadget -p | addons-315216 | jenkins | v1.34.0 | 13 Sep 24 18:32 UTC | 13 Sep 24 18:32 UTC |
| | addons-315216 | | | | | |
| addons | addons-315216 addons | addons-315216 | jenkins | v1.34.0 | 13 Sep 24 18:32 UTC | 13 Sep 24 18:32 UTC |
| | disable metrics-server | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-315216 addons disable | addons-315216 | jenkins | v1.34.0 | 13 Sep 24 18:32 UTC | 13 Sep 24 18:33 UTC |
| | headlamp --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| addons | disable nvidia-device-plugin | addons-315216 | jenkins | v1.34.0 | 13 Sep 24 18:33 UTC | 13 Sep 24 18:33 UTC |
| | -p addons-315216 | | | | | |
| addons | addons-315216 addons disable | addons-315216 | jenkins | v1.34.0 | 13 Sep 24 18:33 UTC | 13 Sep 24 18:33 UTC |
| | yakd --alsologtostderr -v=1 | | | | | |
| ssh | addons-315216 ssh curl -s | addons-315216 | jenkins | v1.34.0 | 13 Sep 24 18:33 UTC | 13 Sep 24 18:33 UTC |
| | http://127.0.0.1/ -H 'Host: | | | | | |
| | nginx.example.com' | | | | | |
| ip | addons-315216 ip | addons-315216 | jenkins | v1.34.0 | 13 Sep 24 18:33 UTC | 13 Sep 24 18:33 UTC |
| addons | addons-315216 addons disable | addons-315216 | jenkins | v1.34.0 | 13 Sep 24 18:33 UTC | 13 Sep 24 18:33 UTC |
| | ingress-dns --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| addons | addons-315216 addons disable | addons-315216 | jenkins | v1.34.0 | 13 Sep 24 18:33 UTC | 13 Sep 24 18:33 UTC |
| | ingress --alsologtostderr -v=1 | | | | | |
| ssh | addons-315216 ssh cat | addons-315216 | jenkins | v1.34.0 | 13 Sep 24 18:33 UTC | 13 Sep 24 18:33 UTC |
| | /opt/local-path-provisioner/pvc-2743739b-b850-4f4a-8324-d1e883dc17e5_default_test-pvc/file1 | | | | | |
| addons | addons-315216 addons disable | addons-315216 | jenkins | v1.34.0 | 13 Sep 24 18:33 UTC | |
| | storage-provisioner-rancher | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | disable cloud-spanner -p | addons-315216 | jenkins | v1.34.0 | 13 Sep 24 18:33 UTC | 13 Sep 24 18:33 UTC |
| | addons-315216 | | | | | |
| addons | addons-315216 addons | addons-315216 | jenkins | v1.34.0 | 13 Sep 24 18:33 UTC | 13 Sep 24 18:33 UTC |
| | disable csi-hostpath-driver | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-315216 addons | addons-315216 | jenkins | v1.34.0 | 13 Sep 24 18:33 UTC | 13 Sep 24 18:33 UTC |
| | disable volumesnapshots | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| ip | addons-315216 ip | addons-315216 | jenkins | v1.34.0 | 13 Sep 24 18:33 UTC | 13 Sep 24 18:33 UTC |
| addons | addons-315216 addons disable | addons-315216 | jenkins | v1.34.0 | 13 Sep 24 18:33 UTC | 13 Sep 24 18:33 UTC |
| | registry --alsologtostderr | | | | | |
| | -v=1 | | | | | |
|---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
==> Last Start <==
Log file created at: 2024/09/13 18:20:37
Running on machine: ubuntu-20-agent-14
Binary: Built with gc go1.23.0 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I0913 18:20:37.622079 11774 out.go:345] Setting OutFile to fd 1 ...
I0913 18:20:37.622164 11774 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0913 18:20:37.622171 11774 out.go:358] Setting ErrFile to fd 2...
I0913 18:20:37.622175 11774 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0913 18:20:37.622329 11774 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19636-3724/.minikube/bin
I0913 18:20:37.622838 11774 out.go:352] Setting JSON to false
I0913 18:20:37.623599 11774 start.go:129] hostinfo: {"hostname":"ubuntu-20-agent-14","uptime":189,"bootTime":1726251449,"procs":176,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1068-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I0913 18:20:37.623685 11774 start.go:139] virtualization: kvm guest
I0913 18:20:37.625620 11774 out.go:177] * [addons-315216] minikube v1.34.0 on Ubuntu 20.04 (kvm/amd64)
I0913 18:20:37.626906 11774 notify.go:220] Checking for updates...
I0913 18:20:37.626912 11774 out.go:177] - MINIKUBE_LOCATION=19636
I0913 18:20:37.628079 11774 out.go:177] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I0913 18:20:37.629591 11774 out.go:177] - KUBECONFIG=/home/jenkins/minikube-integration/19636-3724/kubeconfig
I0913 18:20:37.630753 11774 out.go:177] - MINIKUBE_HOME=/home/jenkins/minikube-integration/19636-3724/.minikube
I0913 18:20:37.631775 11774 out.go:177] - MINIKUBE_BIN=out/minikube-linux-amd64
I0913 18:20:37.632836 11774 out.go:177] - MINIKUBE_FORCE_SYSTEMD=
I0913 18:20:37.633886 11774 driver.go:394] Setting default libvirt URI to qemu:///system
I0913 18:20:37.653342 11774 docker.go:123] docker version: linux-27.2.1:Docker Engine - Community
I0913 18:20:37.653406 11774 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0913 18:20:37.699455 11774 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:30 OomKillDisable:true NGoroutines:45 SystemTime:2024-09-13 18:20:37.689365898 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1068-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:x86
_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33647943680 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-14 Labels:[] ExperimentalBuild:false ServerVersion:27.2.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:7f7fdf5fed64eb6a7caf99b3e12efcf9d60e311c Expected:7f7fdf5fed64eb6a7caf99b3e12efcf9d60e311c} RuncCommit:{ID:v1.1.14-0-g2c9f560 Expected:v1.1.14-0-g2c9f560} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerEr
rors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.16.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.29.2] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0913 18:20:37.699553 11774 docker.go:318] overlay module found
I0913 18:20:37.701342 11774 out.go:177] * Using the docker driver based on user configuration
I0913 18:20:37.702551 11774 start.go:297] selected driver: docker
I0913 18:20:37.702562 11774 start.go:901] validating driver "docker" against <nil>
I0913 18:20:37.702571 11774 start.go:912] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I0913 18:20:37.703389 11774 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0913 18:20:37.746659 11774 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:26 OomKillDisable:true NGoroutines:45 SystemTime:2024-09-13 18:20:37.738713127 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1068-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:x86
_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33647943680 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-14 Labels:[] ExperimentalBuild:false ServerVersion:27.2.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:7f7fdf5fed64eb6a7caf99b3e12efcf9d60e311c Expected:7f7fdf5fed64eb6a7caf99b3e12efcf9d60e311c} RuncCommit:{ID:v1.1.14-0-g2c9f560 Expected:v1.1.14-0-g2c9f560} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerEr
rors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.16.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.29.2] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0913 18:20:37.746804 11774 start_flags.go:310] no existing cluster config was found, will generate one from the flags
I0913 18:20:37.747024 11774 start_flags.go:947] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0913 18:20:37.748612 11774 out.go:177] * Using Docker driver with root privileges
I0913 18:20:37.749809 11774 cni.go:84] Creating CNI manager for ""
I0913 18:20:37.749874 11774 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0913 18:20:37.749885 11774 start_flags.go:319] Found "bridge CNI" CNI - setting NetworkPlugin=cni
I0913 18:20:37.749950 11774 start.go:340] cluster config:
{Name:addons-315216 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726193793-19634@sha256:4434bf9c4c4590e602ea482d2337d9d858a3db898bec2a85c17f78c81593c44e Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.1 ClusterName:addons-315216 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime
:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock:
SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0913 18:20:37.751216 11774 out.go:177] * Starting "addons-315216" primary control-plane node in "addons-315216" cluster
I0913 18:20:37.752214 11774 cache.go:121] Beginning downloading kic base image for docker with docker
I0913 18:20:37.753232 11774 out.go:177] * Pulling base image v0.0.45-1726193793-19634 ...
I0913 18:20:37.754137 11774 preload.go:131] Checking if preload exists for k8s version v1.31.1 and runtime docker
I0913 18:20:37.754169 11774 preload.go:146] Found local preload: /home/jenkins/minikube-integration/19636-3724/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.1-docker-overlay2-amd64.tar.lz4
I0913 18:20:37.754181 11774 cache.go:56] Caching tarball of preloaded images
I0913 18:20:37.754219 11774 image.go:79] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726193793-19634@sha256:4434bf9c4c4590e602ea482d2337d9d858a3db898bec2a85c17f78c81593c44e in local docker daemon
I0913 18:20:37.754271 11774 preload.go:172] Found /home/jenkins/minikube-integration/19636-3724/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.1-docker-overlay2-amd64.tar.lz4 in cache, skipping download
I0913 18:20:37.754284 11774 cache.go:59] Finished verifying existence of preloaded tar for v1.31.1 on docker
I0913 18:20:37.754634 11774 profile.go:143] Saving config to /home/jenkins/minikube-integration/19636-3724/.minikube/profiles/addons-315216/config.json ...
I0913 18:20:37.754654 11774 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19636-3724/.minikube/profiles/addons-315216/config.json: {Name:mkac6589fc12791c34c9067d069a69a8559b9e44 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0913 18:20:37.768267 11774 cache.go:149] Downloading gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726193793-19634@sha256:4434bf9c4c4590e602ea482d2337d9d858a3db898bec2a85c17f78c81593c44e to local cache
I0913 18:20:37.768342 11774 image.go:63] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726193793-19634@sha256:4434bf9c4c4590e602ea482d2337d9d858a3db898bec2a85c17f78c81593c44e in local cache directory
I0913 18:20:37.768355 11774 image.go:66] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726193793-19634@sha256:4434bf9c4c4590e602ea482d2337d9d858a3db898bec2a85c17f78c81593c44e in local cache directory, skipping pull
I0913 18:20:37.768359 11774 image.go:135] gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726193793-19634@sha256:4434bf9c4c4590e602ea482d2337d9d858a3db898bec2a85c17f78c81593c44e exists in cache, skipping pull
I0913 18:20:37.768368 11774 cache.go:152] successfully saved gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726193793-19634@sha256:4434bf9c4c4590e602ea482d2337d9d858a3db898bec2a85c17f78c81593c44e as a tarball
I0913 18:20:37.768375 11774 cache.go:162] Loading gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726193793-19634@sha256:4434bf9c4c4590e602ea482d2337d9d858a3db898bec2a85c17f78c81593c44e from local cache
I0913 18:20:49.395078 11774 cache.go:164] successfully loaded and using gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726193793-19634@sha256:4434bf9c4c4590e602ea482d2337d9d858a3db898bec2a85c17f78c81593c44e from cached tarball
I0913 18:20:49.395109 11774 cache.go:194] Successfully downloaded all kic artifacts
I0913 18:20:49.395147 11774 start.go:360] acquireMachinesLock for addons-315216: {Name:mkd469c6369f105e9f5fe2a932062bc589aa74d2 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0913 18:20:49.395237 11774 start.go:364] duration metric: took 68.605µs to acquireMachinesLock for "addons-315216"
I0913 18:20:49.395263 11774 start.go:93] Provisioning new machine with config: &{Name:addons-315216 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726193793-19634@sha256:4434bf9c4c4590e602ea482d2337d9d858a3db898bec2a85c17f78c81593c44e Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.1 ClusterName:addons-315216 Namespace:default APIServerHAVIP: APIServerName:min
ikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false Cust
omQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}
I0913 18:20:49.395350 11774 start.go:125] createHost starting for "" (driver="docker")
I0913 18:20:49.397777 11774 out.go:235] * Creating docker container (CPUs=2, Memory=4000MB) ...
I0913 18:20:49.398017 11774 start.go:159] libmachine.API.Create for "addons-315216" (driver="docker")
I0913 18:20:49.398050 11774 client.go:168] LocalClient.Create starting
I0913 18:20:49.398152 11774 main.go:141] libmachine: Creating CA: /home/jenkins/minikube-integration/19636-3724/.minikube/certs/ca.pem
I0913 18:20:49.631678 11774 main.go:141] libmachine: Creating client certificate: /home/jenkins/minikube-integration/19636-3724/.minikube/certs/cert.pem
I0913 18:20:49.831979 11774 cli_runner.go:164] Run: docker network inspect addons-315216 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W0913 18:20:49.846976 11774 cli_runner.go:211] docker network inspect addons-315216 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I0913 18:20:49.847056 11774 network_create.go:284] running [docker network inspect addons-315216] to gather additional debugging logs...
I0913 18:20:49.847073 11774 cli_runner.go:164] Run: docker network inspect addons-315216
W0913 18:20:49.862020 11774 cli_runner.go:211] docker network inspect addons-315216 returned with exit code 1
I0913 18:20:49.862050 11774 network_create.go:287] error running [docker network inspect addons-315216]: docker network inspect addons-315216: exit status 1
stdout:
[]
stderr:
Error response from daemon: network addons-315216 not found
I0913 18:20:49.862062 11774 network_create.go:289] output of [docker network inspect addons-315216]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network addons-315216 not found
** /stderr **
I0913 18:20:49.862133 11774 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0913 18:20:49.876956 11774 network.go:206] using free private subnet 192.168.49.0/24: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc00191a910}
I0913 18:20:49.876999 11774 network_create.go:124] attempt to create docker network addons-315216 192.168.49.0/24 with gateway 192.168.49.1 and MTU of 1500 ...
I0913 18:20:49.877037 11774 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.49.0/24 --gateway=192.168.49.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=addons-315216 addons-315216
I0913 18:20:49.931960 11774 network_create.go:108] docker network addons-315216 192.168.49.0/24 created
I0913 18:20:49.931990 11774 kic.go:121] calculated static IP "192.168.49.2" for the "addons-315216" container
I0913 18:20:49.932042 11774 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0913 18:20:49.946536 11774 cli_runner.go:164] Run: docker volume create addons-315216 --label name.minikube.sigs.k8s.io=addons-315216 --label created_by.minikube.sigs.k8s.io=true
I0913 18:20:49.962350 11774 oci.go:103] Successfully created a docker volume addons-315216
I0913 18:20:49.962437 11774 cli_runner.go:164] Run: docker run --rm --name addons-315216-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-315216 --entrypoint /usr/bin/test -v addons-315216:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726193793-19634@sha256:4434bf9c4c4590e602ea482d2337d9d858a3db898bec2a85c17f78c81593c44e -d /var/lib
I0913 18:20:57.101515 11774 cli_runner.go:217] Completed: docker run --rm --name addons-315216-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-315216 --entrypoint /usr/bin/test -v addons-315216:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726193793-19634@sha256:4434bf9c4c4590e602ea482d2337d9d858a3db898bec2a85c17f78c81593c44e -d /var/lib: (7.139033373s)
I0913 18:20:57.101549 11774 oci.go:107] Successfully prepared a docker volume addons-315216
I0913 18:20:57.101565 11774 preload.go:131] Checking if preload exists for k8s version v1.31.1 and runtime docker
I0913 18:20:57.101584 11774 kic.go:194] Starting extracting preloaded images to volume ...
I0913 18:20:57.101636 11774 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/19636-3724/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.1-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v addons-315216:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726193793-19634@sha256:4434bf9c4c4590e602ea482d2337d9d858a3db898bec2a85c17f78c81593c44e -I lz4 -xf /preloaded.tar -C /extractDir
I0913 18:21:00.887434 11774 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/19636-3724/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.1-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v addons-315216:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726193793-19634@sha256:4434bf9c4c4590e602ea482d2337d9d858a3db898bec2a85c17f78c81593c44e -I lz4 -xf /preloaded.tar -C /extractDir: (3.785767467s)
I0913 18:21:00.887459 11774 kic.go:203] duration metric: took 3.78587336s to extract preloaded images to volume ...
W0913 18:21:00.887610 11774 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I0913 18:21:00.887733 11774 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0913 18:21:00.934295 11774 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname addons-315216 --name addons-315216 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-315216 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=addons-315216 --network addons-315216 --ip 192.168.49.2 --volume addons-315216:/var --security-opt apparmor=unconfined --memory=4000mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726193793-19634@sha256:4434bf9c4c4590e602ea482d2337d9d858a3db898bec2a85c17f78c81593c44e
I0913 18:21:01.251847 11774 cli_runner.go:164] Run: docker container inspect addons-315216 --format={{.State.Running}}
I0913 18:21:01.269216 11774 cli_runner.go:164] Run: docker container inspect addons-315216 --format={{.State.Status}}
I0913 18:21:01.287109 11774 cli_runner.go:164] Run: docker exec addons-315216 stat /var/lib/dpkg/alternatives/iptables
I0913 18:21:01.328940 11774 oci.go:144] the created container "addons-315216" has a running status.
I0913 18:21:01.328975 11774 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/19636-3724/.minikube/machines/addons-315216/id_rsa...
I0913 18:21:01.503353 11774 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/19636-3724/.minikube/machines/addons-315216/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0913 18:21:01.526219 11774 cli_runner.go:164] Run: docker container inspect addons-315216 --format={{.State.Status}}
I0913 18:21:01.543637 11774 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0913 18:21:01.543662 11774 kic_runner.go:114] Args: [docker exec --privileged addons-315216 chown docker:docker /home/docker/.ssh/authorized_keys]
I0913 18:21:01.608418 11774 cli_runner.go:164] Run: docker container inspect addons-315216 --format={{.State.Status}}
I0913 18:21:01.628565 11774 machine.go:93] provisionDockerMachine start ...
I0913 18:21:01.628640 11774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-315216
I0913 18:21:01.647681 11774 main.go:141] libmachine: Using SSH client type: native
I0913 18:21:01.647862 11774 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x86c560] 0x86f240 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0913 18:21:01.647875 11774 main.go:141] libmachine: About to run SSH command:
hostname
I0913 18:21:01.840545 11774 main.go:141] libmachine: SSH cmd err, output: <nil>: addons-315216
I0913 18:21:01.840583 11774 ubuntu.go:169] provisioning hostname "addons-315216"
I0913 18:21:01.840645 11774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-315216
I0913 18:21:01.857070 11774 main.go:141] libmachine: Using SSH client type: native
I0913 18:21:01.857244 11774 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x86c560] 0x86f240 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0913 18:21:01.857269 11774 main.go:141] libmachine: About to run SSH command:
sudo hostname addons-315216 && echo "addons-315216" | sudo tee /etc/hostname
I0913 18:21:01.990975 11774 main.go:141] libmachine: SSH cmd err, output: <nil>: addons-315216
I0913 18:21:01.991061 11774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-315216
I0913 18:21:02.006511 11774 main.go:141] libmachine: Using SSH client type: native
I0913 18:21:02.006691 11774 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x86c560] 0x86f240 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0913 18:21:02.006707 11774 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\saddons-315216' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 addons-315216/g' /etc/hosts;
else
echo '127.0.1.1 addons-315216' | sudo tee -a /etc/hosts;
fi
fi
I0913 18:21:02.125286 11774 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0913 18:21:02.125312 11774 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/19636-3724/.minikube CaCertPath:/home/jenkins/minikube-integration/19636-3724/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/19636-3724/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/19636-3724/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/19636-3724/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/19636-3724/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/19636-3724/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/19636-3724/.minikube}
I0913 18:21:02.125353 11774 ubuntu.go:177] setting up certificates
I0913 18:21:02.125365 11774 provision.go:84] configureAuth start
I0913 18:21:02.125416 11774 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-315216
I0913 18:21:02.140622 11774 provision.go:143] copyHostCerts
I0913 18:21:02.140687 11774 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19636-3724/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/19636-3724/.minikube/ca.pem (1078 bytes)
I0913 18:21:02.140811 11774 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19636-3724/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/19636-3724/.minikube/cert.pem (1123 bytes)
I0913 18:21:02.140888 11774 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19636-3724/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/19636-3724/.minikube/key.pem (1679 bytes)
I0913 18:21:02.140948 11774 provision.go:117] generating server cert: /home/jenkins/minikube-integration/19636-3724/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/19636-3724/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/19636-3724/.minikube/certs/ca-key.pem org=jenkins.addons-315216 san=[127.0.0.1 192.168.49.2 addons-315216 localhost minikube]
I0913 18:21:02.426237 11774 provision.go:177] copyRemoteCerts
I0913 18:21:02.426293 11774 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0913 18:21:02.426326 11774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-315216
I0913 18:21:02.441726 11774 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19636-3724/.minikube/machines/addons-315216/id_rsa Username:docker}
I0913 18:21:02.529150 11774 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19636-3724/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I0913 18:21:02.549166 11774 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19636-3724/.minikube/machines/server.pem --> /etc/docker/server.pem (1208 bytes)
I0913 18:21:02.568923 11774 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19636-3724/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I0913 18:21:02.588387 11774 provision.go:87] duration metric: took 463.009888ms to configureAuth
I0913 18:21:02.588412 11774 ubuntu.go:193] setting minikube options for container-runtime
I0913 18:21:02.588567 11774 config.go:182] Loaded profile config "addons-315216": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.31.1
I0913 18:21:02.588619 11774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-315216
I0913 18:21:02.603648 11774 main.go:141] libmachine: Using SSH client type: native
I0913 18:21:02.603815 11774 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x86c560] 0x86f240 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0913 18:21:02.603827 11774 main.go:141] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I0913 18:21:02.721394 11774 main.go:141] libmachine: SSH cmd err, output: <nil>: overlay
I0913 18:21:02.721420 11774 ubuntu.go:71] root file system type: overlay
I0913 18:21:02.721534 11774 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I0913 18:21:02.721596 11774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-315216
I0913 18:21:02.736743 11774 main.go:141] libmachine: Using SSH client type: native
I0913 18:21:02.736905 11774 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x86c560] 0x86f240 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0913 18:21:02.736961 11774 main.go:141] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I0913 18:21:02.866701 11774 main.go:141] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
I0913 18:21:02.866764 11774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-315216
I0913 18:21:02.882223 11774 main.go:141] libmachine: Using SSH client type: native
I0913 18:21:02.882432 11774 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x86c560] 0x86f240 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0913 18:21:02.882458 11774 main.go:141] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I0913 18:21:03.551281 11774 main.go:141] libmachine: SSH cmd err, output: <nil>: --- /lib/systemd/system/docker.service 2024-09-06 12:06:41.000000000 +0000
+++ /lib/systemd/system/docker.service.new 2024-09-13 18:21:02.859175682 +0000
@@ -1,46 +1,49 @@
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
-After=network-online.target docker.socket firewalld.service containerd.service time-set.target
-Wants=network-online.target containerd.service
+BindsTo=containerd.service
+After=network-online.target firewalld.service containerd.service
+Wants=network-online.target
Requires=docker.socket
+StartLimitBurst=3
+StartLimitIntervalSec=60
[Service]
Type=notify
-# the default is not to use systemd for cgroups because the delegate issues still
-# exists and systemd currently does not support the cgroup feature set required
-# for containers run by docker
-ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
-ExecReload=/bin/kill -s HUP $MAINPID
-TimeoutStartSec=0
-RestartSec=2
-Restart=always
+Restart=on-failure
-# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
-# Both the old, and new location are accepted by systemd 229 and up, so using the old location
-# to make them work for either version of systemd.
-StartLimitBurst=3
-# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
-# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
-# this option work for either version of systemd.
-StartLimitInterval=60s
+
+# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
+# The base configuration already specifies an 'ExecStart=...' command. The first directive
+# here is to clear out that command inherited from the base configuration. Without this,
+# the command from the base configuration and the command specified here are treated as
+# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
+# will catch this invalid input and refuse to start the service with an error like:
+# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
+
+# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
+# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
+ExecStart=
+ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
+ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
+LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
-# Comment TasksMax if your systemd version does not support it.
-# Only systemd 226 and above support this option.
+# Uncomment TasksMax if your systemd version supports it.
+# Only systemd 226 and above support this version.
TasksMax=infinity
+TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
-OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
Synchronizing state of docker.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable docker
I0913 18:21:03.551322 11774 machine.go:96] duration metric: took 1.922738555s to provisionDockerMachine
I0913 18:21:03.551336 11774 client.go:171] duration metric: took 14.153274624s to LocalClient.Create
I0913 18:21:03.551354 11774 start.go:167] duration metric: took 14.153337196s to libmachine.API.Create "addons-315216"
I0913 18:21:03.551365 11774 start.go:293] postStartSetup for "addons-315216" (driver="docker")
I0913 18:21:03.551375 11774 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0913 18:21:03.551429 11774 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0913 18:21:03.551470 11774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-315216
I0913 18:21:03.567684 11774 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19636-3724/.minikube/machines/addons-315216/id_rsa Username:docker}
I0913 18:21:03.653633 11774 ssh_runner.go:195] Run: cat /etc/os-release
I0913 18:21:03.656359 11774 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0913 18:21:03.656388 11774 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0913 18:21:03.656395 11774 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0913 18:21:03.656401 11774 info.go:137] Remote host: Ubuntu 22.04.4 LTS
I0913 18:21:03.656409 11774 filesync.go:126] Scanning /home/jenkins/minikube-integration/19636-3724/.minikube/addons for local assets ...
I0913 18:21:03.656465 11774 filesync.go:126] Scanning /home/jenkins/minikube-integration/19636-3724/.minikube/files for local assets ...
I0913 18:21:03.656496 11774 start.go:296] duration metric: took 105.124671ms for postStartSetup
I0913 18:21:03.656803 11774 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-315216
I0913 18:21:03.672056 11774 profile.go:143] Saving config to /home/jenkins/minikube-integration/19636-3724/.minikube/profiles/addons-315216/config.json ...
I0913 18:21:03.672302 11774 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0913 18:21:03.672349 11774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-315216
I0913 18:21:03.688467 11774 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19636-3724/.minikube/machines/addons-315216/id_rsa Username:docker}
I0913 18:21:03.773937 11774 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0913 18:21:03.777821 11774 start.go:128] duration metric: took 14.38245778s to createHost
I0913 18:21:03.777850 11774 start.go:83] releasing machines lock for "addons-315216", held for 14.382600069s
I0913 18:21:03.777902 11774 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-315216
I0913 18:21:03.793501 11774 ssh_runner.go:195] Run: cat /version.json
I0913 18:21:03.793550 11774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-315216
I0913 18:21:03.793558 11774 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0913 18:21:03.793605 11774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-315216
I0913 18:21:03.811796 11774 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19636-3724/.minikube/machines/addons-315216/id_rsa Username:docker}
I0913 18:21:03.812062 11774 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19636-3724/.minikube/machines/addons-315216/id_rsa Username:docker}
I0913 18:21:03.962985 11774 ssh_runner.go:195] Run: systemctl --version
I0913 18:21:03.966714 11774 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0913 18:21:03.970270 11774 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I0913 18:21:03.990577 11774 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I0913 18:21:03.990634 11774 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0913 18:21:04.013991 11774 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I0913 18:21:04.014015 11774 start.go:495] detecting cgroup driver to use...
I0913 18:21:04.014042 11774 detect.go:187] detected "cgroupfs" cgroup driver on host os
I0913 18:21:04.014129 11774 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0913 18:21:04.027539 11774 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10"|' /etc/containerd/config.toml"
I0913 18:21:04.035545 11774 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0913 18:21:04.043618 11774 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I0913 18:21:04.043672 11774 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I0913 18:21:04.051741 11774 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0913 18:21:04.059445 11774 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0913 18:21:04.067342 11774 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0913 18:21:04.075352 11774 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0913 18:21:04.082813 11774 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0913 18:21:04.090742 11774 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0913 18:21:04.098665 11774 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0913 18:21:04.106587 11774 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0913 18:21:04.113303 11774 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0913 18:21:04.120092 11774 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0913 18:21:04.196082 11774 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0913 18:21:04.280699 11774 start.go:495] detecting cgroup driver to use...
I0913 18:21:04.280747 11774 detect.go:187] detected "cgroupfs" cgroup driver on host os
I0913 18:21:04.280786 11774 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I0913 18:21:04.290789 11774 cruntime.go:279] skipping containerd shutdown because we are bound to it
I0913 18:21:04.290836 11774 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0913 18:21:04.301202 11774 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I0913 18:21:04.314644 11774 ssh_runner.go:195] Run: which cri-dockerd
I0913 18:21:04.317659 11774 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I0913 18:21:04.326208 11774 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (190 bytes)
I0913 18:21:04.341135 11774 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I0913 18:21:04.432349 11774 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I0913 18:21:04.526370 11774 docker.go:574] configuring docker to use "cgroupfs" as cgroup driver...
I0913 18:21:04.526544 11774 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
I0913 18:21:04.541742 11774 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0913 18:21:04.613180 11774 ssh_runner.go:195] Run: sudo systemctl restart docker
I0913 18:21:04.848601 11774 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I0913 18:21:04.858900 11774 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0913 18:21:04.868919 11774 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I0913 18:21:04.949098 11774 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I0913 18:21:05.020308 11774 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0913 18:21:05.097993 11774 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I0913 18:21:05.109101 11774 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0913 18:21:05.118172 11774 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0913 18:21:05.189195 11774 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I0913 18:21:05.242648 11774 start.go:542] Will wait 60s for socket path /var/run/cri-dockerd.sock
I0913 18:21:05.242734 11774 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I0913 18:21:05.246025 11774 start.go:563] Will wait 60s for crictl version
I0913 18:21:05.246072 11774 ssh_runner.go:195] Run: which crictl
I0913 18:21:05.248863 11774 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0913 18:21:05.277695 11774 start.go:579] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 27.2.1
RuntimeApiVersion: v1
I0913 18:21:05.277757 11774 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0913 18:21:05.298177 11774 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0913 18:21:05.320914 11774 out.go:235] * Preparing Kubernetes v1.31.1 on Docker 27.2.1 ...
I0913 18:21:05.320991 11774 cli_runner.go:164] Run: docker network inspect addons-315216 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0913 18:21:05.335847 11774 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I0913 18:21:05.338863 11774 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0913 18:21:05.348005 11774 kubeadm.go:883] updating cluster {Name:addons-315216 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726193793-19634@sha256:4434bf9c4c4590e602ea482d2337d9d858a3db898bec2a85c17f78c81593c44e Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.1 ClusterName:addons-315216 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNa
mes:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuF
irmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I0913 18:21:05.348133 11774 preload.go:131] Checking if preload exists for k8s version v1.31.1 and runtime docker
I0913 18:21:05.348190 11774 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0913 18:21:05.364878 11774 docker.go:685] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.31.1
registry.k8s.io/kube-controller-manager:v1.31.1
registry.k8s.io/kube-scheduler:v1.31.1
registry.k8s.io/kube-proxy:v1.31.1
registry.k8s.io/coredns/coredns:v1.11.3
registry.k8s.io/etcd:3.5.15-0
registry.k8s.io/pause:3.10
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I0913 18:21:05.364895 11774 docker.go:615] Images already preloaded, skipping extraction
I0913 18:21:05.364945 11774 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0913 18:21:05.380835 11774 docker.go:685] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.31.1
registry.k8s.io/kube-scheduler:v1.31.1
registry.k8s.io/kube-controller-manager:v1.31.1
registry.k8s.io/kube-proxy:v1.31.1
registry.k8s.io/coredns/coredns:v1.11.3
registry.k8s.io/etcd:3.5.15-0
registry.k8s.io/pause:3.10
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I0913 18:21:05.380860 11774 cache_images.go:84] Images are preloaded, skipping loading
I0913 18:21:05.380871 11774 kubeadm.go:934] updating node { 192.168.49.2 8443 v1.31.1 docker true true} ...
I0913 18:21:05.380961 11774 kubeadm.go:946] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.31.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=addons-315216 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.2
[Install]
config:
{KubernetesVersion:v1.31.1 ClusterName:addons-315216 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0913 18:21:05.381004 11774 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
I0913 18:21:05.421844 11774 cni.go:84] Creating CNI manager for ""
I0913 18:21:05.421870 11774 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0913 18:21:05.421879 11774 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
I0913 18:21:05.421896 11774 kubeadm.go:181] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8443 KubernetesVersion:v1.31.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:addons-315216 NodeName:addons-315216 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.49.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kuber
netes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I0913 18:21:05.422026 11774 kubeadm.go:187] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.49.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
name: "addons-315216"
kubeletExtraArgs:
node-ip: 192.168.49.2
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.31.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I0913 18:21:05.422075 11774 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.31.1
I0913 18:21:05.430258 11774 binaries.go:44] Found k8s binaries, skipping transfer
I0913 18:21:05.430332 11774 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I0913 18:21:05.437511 11774 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (312 bytes)
I0913 18:21:05.452089 11774 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0913 18:21:05.466469 11774 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2155 bytes)
I0913 18:21:05.480733 11774 ssh_runner.go:195] Run: grep 192.168.49.2 control-plane.minikube.internal$ /etc/hosts
I0913 18:21:05.483446 11774 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0913 18:21:05.492057 11774 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0913 18:21:05.562559 11774 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0913 18:21:05.574309 11774 certs.go:68] Setting up /home/jenkins/minikube-integration/19636-3724/.minikube/profiles/addons-315216 for IP: 192.168.49.2
I0913 18:21:05.574329 11774 certs.go:194] generating shared ca certs ...
I0913 18:21:05.574355 11774 certs.go:226] acquiring lock for ca certs: {Name:mk30906b62223b99152c4d67bec8517fa308d1fb Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0913 18:21:05.574458 11774 certs.go:240] generating "minikubeCA" ca cert: /home/jenkins/minikube-integration/19636-3724/.minikube/ca.key
I0913 18:21:05.794726 11774 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19636-3724/.minikube/ca.crt ...
I0913 18:21:05.794750 11774 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19636-3724/.minikube/ca.crt: {Name:mkc80074970e1242bfacc3c9f057072b02a9533e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0913 18:21:05.794899 11774 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19636-3724/.minikube/ca.key ...
I0913 18:21:05.794908 11774 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19636-3724/.minikube/ca.key: {Name:mk66ef0bf208c9f3e45eefb23304d19ebf6e1743 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0913 18:21:05.794975 11774 certs.go:240] generating "proxyClientCA" ca cert: /home/jenkins/minikube-integration/19636-3724/.minikube/proxy-client-ca.key
I0913 18:21:05.899577 11774 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19636-3724/.minikube/proxy-client-ca.crt ...
I0913 18:21:05.899601 11774 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19636-3724/.minikube/proxy-client-ca.crt: {Name:mk3de351ff27390f75702fb762ff7275b36b8083 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0913 18:21:05.899732 11774 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19636-3724/.minikube/proxy-client-ca.key ...
I0913 18:21:05.899741 11774 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19636-3724/.minikube/proxy-client-ca.key: {Name:mk6287a32ce037416b89e1fb9c3406f9f9919e9d Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0913 18:21:05.899801 11774 certs.go:256] generating profile certs ...
I0913 18:21:05.899852 11774 certs.go:363] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/19636-3724/.minikube/profiles/addons-315216/client.key
I0913 18:21:05.899871 11774 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19636-3724/.minikube/profiles/addons-315216/client.crt with IP's: []
I0913 18:21:06.054981 11774 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19636-3724/.minikube/profiles/addons-315216/client.crt ...
I0913 18:21:06.055009 11774 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19636-3724/.minikube/profiles/addons-315216/client.crt: {Name:mk8ffe00c34c62d38ca12589d6401134b22742c9 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0913 18:21:06.055150 11774 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19636-3724/.minikube/profiles/addons-315216/client.key ...
I0913 18:21:06.055156 11774 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19636-3724/.minikube/profiles/addons-315216/client.key: {Name:mk3c6cc57dd1367570cd38344b98be400024fc30 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0913 18:21:06.055221 11774 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/19636-3724/.minikube/profiles/addons-315216/apiserver.key.1603746e
I0913 18:21:06.055239 11774 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19636-3724/.minikube/profiles/addons-315216/apiserver.crt.1603746e with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.49.2]
I0913 18:21:06.335406 11774 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19636-3724/.minikube/profiles/addons-315216/apiserver.crt.1603746e ...
I0913 18:21:06.335429 11774 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19636-3724/.minikube/profiles/addons-315216/apiserver.crt.1603746e: {Name:mke8879123003ad5180e44d22dc9118aef6500a3 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0913 18:21:06.335561 11774 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19636-3724/.minikube/profiles/addons-315216/apiserver.key.1603746e ...
I0913 18:21:06.335572 11774 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19636-3724/.minikube/profiles/addons-315216/apiserver.key.1603746e: {Name:mk95c7d82a90ec8eda1ddb0bb2d7ad29e87a894c Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0913 18:21:06.335635 11774 certs.go:381] copying /home/jenkins/minikube-integration/19636-3724/.minikube/profiles/addons-315216/apiserver.crt.1603746e -> /home/jenkins/minikube-integration/19636-3724/.minikube/profiles/addons-315216/apiserver.crt
I0913 18:21:06.335701 11774 certs.go:385] copying /home/jenkins/minikube-integration/19636-3724/.minikube/profiles/addons-315216/apiserver.key.1603746e -> /home/jenkins/minikube-integration/19636-3724/.minikube/profiles/addons-315216/apiserver.key
I0913 18:21:06.335746 11774 certs.go:363] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/19636-3724/.minikube/profiles/addons-315216/proxy-client.key
I0913 18:21:06.335762 11774 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19636-3724/.minikube/profiles/addons-315216/proxy-client.crt with IP's: []
I0913 18:21:06.487619 11774 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19636-3724/.minikube/profiles/addons-315216/proxy-client.crt ...
I0913 18:21:06.487643 11774 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19636-3724/.minikube/profiles/addons-315216/proxy-client.crt: {Name:mkb891460f76649e120fcc26379551a0a1e2bb9f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0913 18:21:06.487770 11774 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19636-3724/.minikube/profiles/addons-315216/proxy-client.key ...
I0913 18:21:06.487780 11774 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19636-3724/.minikube/profiles/addons-315216/proxy-client.key: {Name:mked965dcfee3f6c105d8ad01fca0550fd7bc13f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0913 18:21:06.487925 11774 certs.go:484] found cert: /home/jenkins/minikube-integration/19636-3724/.minikube/certs/ca-key.pem (1675 bytes)
I0913 18:21:06.487955 11774 certs.go:484] found cert: /home/jenkins/minikube-integration/19636-3724/.minikube/certs/ca.pem (1078 bytes)
I0913 18:21:06.487974 11774 certs.go:484] found cert: /home/jenkins/minikube-integration/19636-3724/.minikube/certs/cert.pem (1123 bytes)
I0913 18:21:06.487998 11774 certs.go:484] found cert: /home/jenkins/minikube-integration/19636-3724/.minikube/certs/key.pem (1679 bytes)
I0913 18:21:06.488507 11774 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19636-3724/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0913 18:21:06.509188 11774 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19636-3724/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I0913 18:21:06.528472 11774 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19636-3724/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0913 18:21:06.548151 11774 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19636-3724/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I0913 18:21:06.568082 11774 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19636-3724/.minikube/profiles/addons-315216/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1419 bytes)
I0913 18:21:06.587847 11774 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19636-3724/.minikube/profiles/addons-315216/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I0913 18:21:06.607148 11774 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19636-3724/.minikube/profiles/addons-315216/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0913 18:21:06.626503 11774 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19636-3724/.minikube/profiles/addons-315216/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I0913 18:21:06.646216 11774 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19636-3724/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0913 18:21:06.665743 11774 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I0913 18:21:06.680252 11774 ssh_runner.go:195] Run: openssl version
I0913 18:21:06.684764 11774 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0913 18:21:06.692329 11774 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0913 18:21:06.695101 11774 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Sep 13 18:21 /usr/share/ca-certificates/minikubeCA.pem
I0913 18:21:06.695136 11774 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0913 18:21:06.700774 11774 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0913 18:21:06.708737 11774 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0913 18:21:06.711449 11774 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I0913 18:21:06.711490 11774 kubeadm.go:392] StartCluster: {Name:addons-315216 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726193793-19634@sha256:4434bf9c4c4590e602ea482d2337d9d858a3db898bec2a85c17f78c81593c44e Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.1 ClusterName:addons-315216 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames
:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirm
warePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0913 18:21:06.711611 11774 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I0913 18:21:06.727237 11774 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I0913 18:21:06.734660 11774 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I0913 18:21:06.742046 11774 kubeadm.go:214] ignoring SystemVerification for kubeadm because of docker driver
I0913 18:21:06.742083 11774 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I0913 18:21:06.749198 11774 kubeadm.go:155] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I0913 18:21:06.749211 11774 kubeadm.go:157] found existing configuration files:
I0913 18:21:06.749242 11774 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I0913 18:21:06.756257 11774 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I0913 18:21:06.756304 11774 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I0913 18:21:06.762924 11774 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I0913 18:21:06.769838 11774 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I0913 18:21:06.769887 11774 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I0913 18:21:06.776376 11774 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I0913 18:21:06.782968 11774 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I0913 18:21:06.782998 11774 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I0913 18:21:06.789373 11774 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I0913 18:21:06.795894 11774 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I0913 18:21:06.795927 11774 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I0913 18:21:06.802409 11774 ssh_runner.go:286] Start: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.31.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I0913 18:21:06.833231 11774 kubeadm.go:310] [init] Using Kubernetes version: v1.31.1
I0913 18:21:06.833296 11774 kubeadm.go:310] [preflight] Running pre-flight checks
I0913 18:21:06.849891 11774 kubeadm.go:310] [preflight] The system verification failed. Printing the output from the verification:
I0913 18:21:06.849971 11774 kubeadm.go:310] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1068-gcp[0m
I0913 18:21:06.850018 11774 kubeadm.go:310] [0;37mOS[0m: [0;32mLinux[0m
I0913 18:21:06.850081 11774 kubeadm.go:310] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I0913 18:21:06.850127 11774 kubeadm.go:310] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I0913 18:21:06.850165 11774 kubeadm.go:310] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I0913 18:21:06.850209 11774 kubeadm.go:310] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I0913 18:21:06.850300 11774 kubeadm.go:310] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I0913 18:21:06.850387 11774 kubeadm.go:310] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I0913 18:21:06.850427 11774 kubeadm.go:310] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I0913 18:21:06.850469 11774 kubeadm.go:310] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I0913 18:21:06.850512 11774 kubeadm.go:310] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I0913 18:21:06.895712 11774 kubeadm.go:310] [preflight] Pulling images required for setting up a Kubernetes cluster
I0913 18:21:06.895829 11774 kubeadm.go:310] [preflight] This might take a minute or two, depending on the speed of your internet connection
I0913 18:21:06.895941 11774 kubeadm.go:310] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I0913 18:21:06.905948 11774 kubeadm.go:310] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I0913 18:21:06.909010 11774 out.go:235] - Generating certificates and keys ...
I0913 18:21:06.909094 11774 kubeadm.go:310] [certs] Using existing ca certificate authority
I0913 18:21:06.909172 11774 kubeadm.go:310] [certs] Using existing apiserver certificate and key on disk
I0913 18:21:07.019266 11774 kubeadm.go:310] [certs] Generating "apiserver-kubelet-client" certificate and key
I0913 18:21:07.174731 11774 kubeadm.go:310] [certs] Generating "front-proxy-ca" certificate and key
I0913 18:21:07.378516 11774 kubeadm.go:310] [certs] Generating "front-proxy-client" certificate and key
I0913 18:21:07.506797 11774 kubeadm.go:310] [certs] Generating "etcd/ca" certificate and key
I0913 18:21:07.692384 11774 kubeadm.go:310] [certs] Generating "etcd/server" certificate and key
I0913 18:21:07.692561 11774 kubeadm.go:310] [certs] etcd/server serving cert is signed for DNS names [addons-315216 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0913 18:21:07.777337 11774 kubeadm.go:310] [certs] Generating "etcd/peer" certificate and key
I0913 18:21:07.777498 11774 kubeadm.go:310] [certs] etcd/peer serving cert is signed for DNS names [addons-315216 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0913 18:21:07.876125 11774 kubeadm.go:310] [certs] Generating "etcd/healthcheck-client" certificate and key
I0913 18:21:08.190849 11774 kubeadm.go:310] [certs] Generating "apiserver-etcd-client" certificate and key
I0913 18:21:08.544927 11774 kubeadm.go:310] [certs] Generating "sa" key and public key
I0913 18:21:08.545003 11774 kubeadm.go:310] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I0913 18:21:08.846251 11774 kubeadm.go:310] [kubeconfig] Writing "admin.conf" kubeconfig file
I0913 18:21:09.124086 11774 kubeadm.go:310] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I0913 18:21:09.210796 11774 kubeadm.go:310] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I0913 18:21:09.280743 11774 kubeadm.go:310] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I0913 18:21:09.411368 11774 kubeadm.go:310] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I0913 18:21:09.411856 11774 kubeadm.go:310] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I0913 18:21:09.414006 11774 kubeadm.go:310] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I0913 18:21:09.416285 11774 out.go:235] - Booting up control plane ...
I0913 18:21:09.416375 11774 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-apiserver"
I0913 18:21:09.416444 11774 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I0913 18:21:09.416971 11774 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-scheduler"
I0913 18:21:09.425414 11774 kubeadm.go:310] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I0913 18:21:09.430415 11774 kubeadm.go:310] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I0913 18:21:09.430486 11774 kubeadm.go:310] [kubelet-start] Starting the kubelet
I0913 18:21:09.505859 11774 kubeadm.go:310] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I0913 18:21:09.505998 11774 kubeadm.go:310] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I0913 18:21:10.007228 11774 kubeadm.go:310] [kubelet-check] The kubelet is healthy after 501.46246ms
I0913 18:21:10.007355 11774 kubeadm.go:310] [api-check] Waiting for a healthy API server. This can take up to 4m0s
I0913 18:21:14.508844 11774 kubeadm.go:310] [api-check] The API server is healthy after 4.501572296s
I0913 18:21:14.519024 11774 kubeadm.go:310] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I0913 18:21:14.527163 11774 kubeadm.go:310] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I0913 18:21:14.542540 11774 kubeadm.go:310] [upload-certs] Skipping phase. Please see --upload-certs
I0913 18:21:14.542785 11774 kubeadm.go:310] [mark-control-plane] Marking the node addons-315216 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I0913 18:21:14.548747 11774 kubeadm.go:310] [bootstrap-token] Using token: tuhy79.cpnl0w4vi8mb0vd6
I0913 18:21:14.549913 11774 out.go:235] - Configuring RBAC rules ...
I0913 18:21:14.550071 11774 kubeadm.go:310] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I0913 18:21:14.552758 11774 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I0913 18:21:14.557871 11774 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I0913 18:21:14.559932 11774 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I0913 18:21:14.562063 11774 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I0913 18:21:14.564104 11774 kubeadm.go:310] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I0913 18:21:14.914781 11774 kubeadm.go:310] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I0913 18:21:15.332104 11774 kubeadm.go:310] [addons] Applied essential addon: CoreDNS
I0913 18:21:15.915151 11774 kubeadm.go:310] [addons] Applied essential addon: kube-proxy
I0913 18:21:15.915898 11774 kubeadm.go:310]
I0913 18:21:15.916003 11774 kubeadm.go:310] Your Kubernetes control-plane has initialized successfully!
I0913 18:21:15.916016 11774 kubeadm.go:310]
I0913 18:21:15.916149 11774 kubeadm.go:310] To start using your cluster, you need to run the following as a regular user:
I0913 18:21:15.916161 11774 kubeadm.go:310]
I0913 18:21:15.916196 11774 kubeadm.go:310] mkdir -p $HOME/.kube
I0913 18:21:15.916315 11774 kubeadm.go:310] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I0913 18:21:15.916405 11774 kubeadm.go:310] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I0913 18:21:15.916423 11774 kubeadm.go:310]
I0913 18:21:15.916508 11774 kubeadm.go:310] Alternatively, if you are the root user, you can run:
I0913 18:21:15.916517 11774 kubeadm.go:310]
I0913 18:21:15.916587 11774 kubeadm.go:310] export KUBECONFIG=/etc/kubernetes/admin.conf
I0913 18:21:15.916595 11774 kubeadm.go:310]
I0913 18:21:15.916668 11774 kubeadm.go:310] You should now deploy a pod network to the cluster.
I0913 18:21:15.916775 11774 kubeadm.go:310] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I0913 18:21:15.916870 11774 kubeadm.go:310] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I0913 18:21:15.916893 11774 kubeadm.go:310]
I0913 18:21:15.916995 11774 kubeadm.go:310] You can now join any number of control-plane nodes by copying certificate authorities
I0913 18:21:15.917082 11774 kubeadm.go:310] and service account keys on each node and then running the following as root:
I0913 18:21:15.917093 11774 kubeadm.go:310]
I0913 18:21:15.917200 11774 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token tuhy79.cpnl0w4vi8mb0vd6 \
I0913 18:21:15.917333 11774 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:ff52e83c10dc1cb593d25925eeb92aea4da9258f323e73d474dae3b251f98a2a \
I0913 18:21:15.917365 11774 kubeadm.go:310] --control-plane
I0913 18:21:15.917376 11774 kubeadm.go:310]
I0913 18:21:15.917508 11774 kubeadm.go:310] Then you can join any number of worker nodes by running the following on each as root:
I0913 18:21:15.917523 11774 kubeadm.go:310]
I0913 18:21:15.917648 11774 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token tuhy79.cpnl0w4vi8mb0vd6 \
I0913 18:21:15.917809 11774 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:ff52e83c10dc1cb593d25925eeb92aea4da9258f323e73d474dae3b251f98a2a
I0913 18:21:15.919538 11774 kubeadm.go:310] W0913 18:21:06.830855 1922 common.go:101] your configuration file uses a deprecated API spec: "kubeadm.k8s.io/v1beta3" (kind: "ClusterConfiguration"). Please use 'kubeadm config migrate --old-config old.yaml --new-config new.yaml', which will write the new, similar spec using a newer API version.
I0913 18:21:15.919824 11774 kubeadm.go:310] W0913 18:21:06.831452 1922 common.go:101] your configuration file uses a deprecated API spec: "kubeadm.k8s.io/v1beta3" (kind: "InitConfiguration"). Please use 'kubeadm config migrate --old-config old.yaml --new-config new.yaml', which will write the new, similar spec using a newer API version.
I0913 18:21:15.920012 11774 kubeadm.go:310] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1068-gcp\n", err: exit status 1
I0913 18:21:15.920180 11774 kubeadm.go:310] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I0913 18:21:15.920206 11774 cni.go:84] Creating CNI manager for ""
I0913 18:21:15.920219 11774 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0913 18:21:15.921669 11774 out.go:177] * Configuring bridge CNI (Container Networking Interface) ...
I0913 18:21:15.922983 11774 ssh_runner.go:195] Run: sudo mkdir -p /etc/cni/net.d
I0913 18:21:15.930625 11774 ssh_runner.go:362] scp memory --> /etc/cni/net.d/1-k8s.conflist (496 bytes)
I0913 18:21:15.945389 11774 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I0913 18:21:15.945433 11774 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I0913 18:21:15.945467 11774 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes addons-315216 minikube.k8s.io/updated_at=2024_09_13T18_21_15_0700 minikube.k8s.io/version=v1.34.0 minikube.k8s.io/commit=fdd33bebc6743cfd1c61ec7fe066add478610a92 minikube.k8s.io/name=addons-315216 minikube.k8s.io/primary=true
I0913 18:21:16.006268 11774 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0913 18:21:16.006305 11774 ops.go:34] apiserver oom_adj: -16
I0913 18:21:16.506462 11774 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0913 18:21:17.006456 11774 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0913 18:21:17.506390 11774 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0913 18:21:18.006856 11774 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0913 18:21:18.506859 11774 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0913 18:21:19.006412 11774 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0913 18:21:19.506846 11774 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0913 18:21:20.006430 11774 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0913 18:21:20.506320 11774 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0913 18:21:20.569353 11774 kubeadm.go:1113] duration metric: took 4.623961565s to wait for elevateKubeSystemPrivileges
I0913 18:21:20.569380 11774 kubeadm.go:394] duration metric: took 13.857893144s to StartCluster
I0913 18:21:20.569400 11774 settings.go:142] acquiring lock: {Name:mkdfe968303b166acd0b853425fddda0e662ae88 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0913 18:21:20.569517 11774 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/19636-3724/kubeconfig
I0913 18:21:20.569914 11774 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19636-3724/kubeconfig: {Name:mkb3de2d521cdfaf51b480e5ea10ef30c8160b52 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0913 18:21:20.570138 11774 start.go:235] Will wait 6m0s for node &{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}
I0913 18:21:20.570262 11774 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I0913 18:21:20.570276 11774 addons.go:507] enable addons start: toEnable=map[ambassador:false auto-pause:false cloud-spanner:true csi-hostpath-driver:true dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:true gvisor:false headlamp:false inaccel:false ingress:true ingress-dns:true inspektor-gadget:true istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:true nvidia-device-plugin:true nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:true registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:true volcano:true volumesnapshots:true yakd:true]
I0913 18:21:20.570408 11774 addons.go:69] Setting yakd=true in profile "addons-315216"
I0913 18:21:20.570415 11774 addons.go:69] Setting inspektor-gadget=true in profile "addons-315216"
I0913 18:21:20.570431 11774 addons.go:234] Setting addon yakd=true in "addons-315216"
I0913 18:21:20.570443 11774 addons.go:234] Setting addon inspektor-gadget=true in "addons-315216"
I0913 18:21:20.570433 11774 addons.go:69] Setting storage-provisioner=true in profile "addons-315216"
I0913 18:21:20.570460 11774 addons.go:234] Setting addon storage-provisioner=true in "addons-315216"
I0913 18:21:20.570461 11774 host.go:66] Checking if "addons-315216" exists ...
I0913 18:21:20.570457 11774 addons.go:69] Setting volcano=true in profile "addons-315216"
I0913 18:21:20.570471 11774 host.go:66] Checking if "addons-315216" exists ...
I0913 18:21:20.570477 11774 addons.go:69] Setting nvidia-device-plugin=true in profile "addons-315216"
I0913 18:21:20.570489 11774 addons.go:69] Setting storage-provisioner-rancher=true in profile "addons-315216"
I0913 18:21:20.570495 11774 addons.go:234] Setting addon nvidia-device-plugin=true in "addons-315216"
I0913 18:21:20.570499 11774 addons.go:69] Setting registry=true in profile "addons-315216"
I0913 18:21:20.570503 11774 addons_storage_classes.go:33] enableOrDisableStorageClasses storage-provisioner-rancher=true on "addons-315216"
I0913 18:21:20.570510 11774 addons.go:234] Setting addon registry=true in "addons-315216"
I0913 18:21:20.570514 11774 host.go:66] Checking if "addons-315216" exists ...
I0913 18:21:20.570516 11774 addons.go:69] Setting metrics-server=true in profile "addons-315216"
I0913 18:21:20.570528 11774 host.go:66] Checking if "addons-315216" exists ...
I0913 18:21:20.570537 11774 addons.go:234] Setting addon metrics-server=true in "addons-315216"
I0913 18:21:20.570552 11774 addons.go:69] Setting volumesnapshots=true in profile "addons-315216"
I0913 18:21:20.570564 11774 host.go:66] Checking if "addons-315216" exists ...
I0913 18:21:20.570572 11774 addons.go:234] Setting addon volumesnapshots=true in "addons-315216"
I0913 18:21:20.570591 11774 host.go:66] Checking if "addons-315216" exists ...
I0913 18:21:20.570479 11774 addons.go:234] Setting addon volcano=true in "addons-315216"
I0913 18:21:20.570640 11774 host.go:66] Checking if "addons-315216" exists ...
I0913 18:21:20.570820 11774 cli_runner.go:164] Run: docker container inspect addons-315216 --format={{.State.Status}}
I0913 18:21:20.570971 11774 cli_runner.go:164] Run: docker container inspect addons-315216 --format={{.State.Status}}
I0913 18:21:20.571011 11774 cli_runner.go:164] Run: docker container inspect addons-315216 --format={{.State.Status}}
I0913 18:21:20.571026 11774 cli_runner.go:164] Run: docker container inspect addons-315216 --format={{.State.Status}}
I0913 18:21:20.571050 11774 cli_runner.go:164] Run: docker container inspect addons-315216 --format={{.State.Status}}
I0913 18:21:20.571055 11774 cli_runner.go:164] Run: docker container inspect addons-315216 --format={{.State.Status}}
I0913 18:21:20.571189 11774 addons.go:69] Setting default-storageclass=true in profile "addons-315216"
I0913 18:21:20.571212 11774 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "addons-315216"
I0913 18:21:20.571328 11774 addons.go:69] Setting cloud-spanner=true in profile "addons-315216"
I0913 18:21:20.571348 11774 addons.go:234] Setting addon cloud-spanner=true in "addons-315216"
I0913 18:21:20.571385 11774 host.go:66] Checking if "addons-315216" exists ...
I0913 18:21:20.571485 11774 cli_runner.go:164] Run: docker container inspect addons-315216 --format={{.State.Status}}
I0913 18:21:20.571897 11774 cli_runner.go:164] Run: docker container inspect addons-315216 --format={{.State.Status}}
I0913 18:21:20.570490 11774 host.go:66] Checking if "addons-315216" exists ...
I0913 18:21:20.572741 11774 addons.go:69] Setting csi-hostpath-driver=true in profile "addons-315216"
I0913 18:21:20.572849 11774 addons.go:234] Setting addon csi-hostpath-driver=true in "addons-315216"
I0913 18:21:20.572876 11774 addons.go:69] Setting ingress=true in profile "addons-315216"
I0913 18:21:20.572891 11774 host.go:66] Checking if "addons-315216" exists ...
I0913 18:21:20.572898 11774 addons.go:234] Setting addon ingress=true in "addons-315216"
I0913 18:21:20.572932 11774 host.go:66] Checking if "addons-315216" exists ...
I0913 18:21:20.573983 11774 out.go:177] * Verifying Kubernetes components...
I0913 18:21:20.570465 11774 config.go:182] Loaded profile config "addons-315216": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.31.1
I0913 18:21:20.575003 11774 addons.go:69] Setting ingress-dns=true in profile "addons-315216"
I0913 18:21:20.575020 11774 addons.go:234] Setting addon ingress-dns=true in "addons-315216"
I0913 18:21:20.575052 11774 host.go:66] Checking if "addons-315216" exists ...
I0913 18:21:20.575356 11774 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0913 18:21:20.575372 11774 cli_runner.go:164] Run: docker container inspect addons-315216 --format={{.State.Status}}
I0913 18:21:20.572862 11774 addons.go:69] Setting gcp-auth=true in profile "addons-315216"
I0913 18:21:20.575425 11774 mustload.go:65] Loading cluster: addons-315216
I0913 18:21:20.601545 11774 addons.go:234] Setting addon default-storageclass=true in "addons-315216"
I0913 18:21:20.601582 11774 host.go:66] Checking if "addons-315216" exists ...
I0913 18:21:20.601973 11774 config.go:182] Loaded profile config "addons-315216": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.31.1
I0913 18:21:20.602041 11774 cli_runner.go:164] Run: docker container inspect addons-315216 --format={{.State.Status}}
I0913 18:21:20.602115 11774 cli_runner.go:164] Run: docker container inspect addons-315216 --format={{.State.Status}}
I0913 18:21:20.602234 11774 cli_runner.go:164] Run: docker container inspect addons-315216 --format={{.State.Status}}
I0913 18:21:20.602271 11774 cli_runner.go:164] Run: docker container inspect addons-315216 --format={{.State.Status}}
I0913 18:21:20.602934 11774 cli_runner.go:164] Run: docker container inspect addons-315216 --format={{.State.Status}}
I0913 18:21:20.603262 11774 cli_runner.go:164] Run: docker container inspect addons-315216 --format={{.State.Status}}
I0913 18:21:20.604249 11774 cli_runner.go:164] Run: docker container inspect addons-315216 --format={{.State.Status}}
I0913 18:21:20.607578 11774 addons.go:234] Setting addon storage-provisioner-rancher=true in "addons-315216"
I0913 18:21:20.607622 11774 host.go:66] Checking if "addons-315216" exists ...
I0913 18:21:20.608048 11774 cli_runner.go:164] Run: docker container inspect addons-315216 --format={{.State.Status}}
I0913 18:21:20.610729 11774 out.go:177] - Using image ghcr.io/inspektor-gadget/inspektor-gadget:v0.32.0
I0913 18:21:20.612581 11774 addons.go:431] installing /etc/kubernetes/addons/ig-namespace.yaml
I0913 18:21:20.612603 11774 ssh_runner.go:362] scp inspektor-gadget/ig-namespace.yaml --> /etc/kubernetes/addons/ig-namespace.yaml (55 bytes)
I0913 18:21:20.612662 11774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-315216
I0913 18:21:20.614579 11774 out.go:177] - Using image registry.k8s.io/metrics-server/metrics-server:v0.7.2
I0913 18:21:20.614579 11774 out.go:177] - Using image docker.io/registry:2.8.3
I0913 18:21:20.616642 11774 addons.go:431] installing /etc/kubernetes/addons/metrics-apiservice.yaml
I0913 18:21:20.616664 11774 ssh_runner.go:362] scp metrics-server/metrics-apiservice.yaml --> /etc/kubernetes/addons/metrics-apiservice.yaml (424 bytes)
I0913 18:21:20.616727 11774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-315216
I0913 18:21:20.620949 11774 out.go:177] - Using image gcr.io/k8s-minikube/kube-registry-proxy:0.0.6
I0913 18:21:20.622591 11774 addons.go:431] installing /etc/kubernetes/addons/registry-rc.yaml
I0913 18:21:20.622614 11774 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-rc.yaml (860 bytes)
I0913 18:21:20.622638 11774 out.go:177] - Using image registry.k8s.io/sig-storage/snapshot-controller:v6.1.0
I0913 18:21:20.622660 11774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-315216
I0913 18:21:20.623704 11774 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml
I0913 18:21:20.623725 11774 ssh_runner.go:362] scp volumesnapshots/csi-hostpath-snapshotclass.yaml --> /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml (934 bytes)
I0913 18:21:20.623774 11774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-315216
I0913 18:21:20.628386 11774 out.go:177] - Using image docker.io/volcanosh/vc-webhook-manager:v1.9.0
I0913 18:21:20.631097 11774 out.go:177] - Using image docker.io/volcanosh/vc-controller-manager:v1.9.0
I0913 18:21:20.632280 11774 out.go:177] - Using image docker.io/volcanosh/vc-scheduler:v1.9.0
I0913 18:21:20.634785 11774 addons.go:431] installing /etc/kubernetes/addons/volcano-deployment.yaml
I0913 18:21:20.634807 11774 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/volcano-deployment.yaml (434001 bytes)
I0913 18:21:20.634857 11774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-315216
I0913 18:21:20.653297 11774 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19636-3724/.minikube/machines/addons-315216/id_rsa Username:docker}
I0913 18:21:20.660387 11774 out.go:177] - Using image registry.k8s.io/ingress-nginx/controller:v1.11.2
I0913 18:21:20.661518 11774 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.3
I0913 18:21:20.665344 11774 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.3
I0913 18:21:20.665580 11774 addons.go:431] installing /etc/kubernetes/addons/storageclass.yaml
I0913 18:21:20.665599 11774 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I0913 18:21:20.665694 11774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-315216
I0913 18:21:20.665842 11774 out.go:177] - Using image gcr.io/k8s-minikube/minikube-ingress-dns:0.0.3
I0913 18:21:20.666768 11774 addons.go:431] installing /etc/kubernetes/addons/ingress-deploy.yaml
I0913 18:21:20.666792 11774 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ingress-deploy.yaml (16078 bytes)
I0913 18:21:20.666874 11774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-315216
I0913 18:21:20.668703 11774 addons.go:431] installing /etc/kubernetes/addons/ingress-dns-pod.yaml
I0913 18:21:20.668723 11774 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ingress-dns-pod.yaml (2442 bytes)
I0913 18:21:20.668870 11774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-315216
I0913 18:21:20.677520 11774 out.go:177] - Using image docker.io/rancher/local-path-provisioner:v0.0.22
I0913 18:21:20.677717 11774 out.go:177] - Using image nvcr.io/nvidia/k8s-device-plugin:v0.16.2
I0913 18:21:20.679180 11774 out.go:177] - Using image docker.io/busybox:stable
I0913 18:21:20.679250 11774 addons.go:431] installing /etc/kubernetes/addons/nvidia-device-plugin.yaml
I0913 18:21:20.679262 11774 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/nvidia-device-plugin.yaml (1966 bytes)
I0913 18:21:20.679303 11774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-315216
I0913 18:21:20.680243 11774 addons.go:431] installing /etc/kubernetes/addons/storage-provisioner-rancher.yaml
I0913 18:21:20.680263 11774 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner-rancher.yaml (3113 bytes)
I0913 18:21:20.680315 11774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-315216
I0913 18:21:20.680603 11774 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19636-3724/.minikube/machines/addons-315216/id_rsa Username:docker}
I0913 18:21:20.685494 11774 out.go:177] - Using image registry.k8s.io/sig-storage/csi-resizer:v1.6.0
I0913 18:21:20.686690 11774 out.go:177] - Using image registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0
I0913 18:21:20.687650 11774 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19636-3724/.minikube/machines/addons-315216/id_rsa Username:docker}
I0913 18:21:20.689807 11774 out.go:177] - Using image registry.k8s.io/sig-storage/csi-provisioner:v3.3.0
I0913 18:21:20.690391 11774 out.go:177] - Using image gcr.io/cloud-spanner-emulator/emulator:1.5.23
I0913 18:21:20.691485 11774 out.go:177] - Using image registry.k8s.io/sig-storage/csi-attacher:v4.0.0
I0913 18:21:20.692490 11774 out.go:177] - Using image registry.k8s.io/sig-storage/csi-external-health-monitor-controller:v0.7.0
I0913 18:21:20.693103 11774 addons.go:431] installing /etc/kubernetes/addons/deployment.yaml
I0913 18:21:20.693119 11774 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/deployment.yaml (1004 bytes)
I0913 18:21:20.693169 11774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-315216
I0913 18:21:20.697714 11774 out.go:177] - Using image registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.6.0
I0913 18:21:20.699795 11774 out.go:177] - Using image registry.k8s.io/sig-storage/hostpathplugin:v1.9.0
I0913 18:21:20.700847 11774 out.go:177] - Using image registry.k8s.io/sig-storage/livenessprobe:v2.8.0
I0913 18:21:20.701758 11774 addons.go:431] installing /etc/kubernetes/addons/rbac-external-attacher.yaml
I0913 18:21:20.701781 11774 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-attacher.yaml --> /etc/kubernetes/addons/rbac-external-attacher.yaml (3073 bytes)
I0913 18:21:20.701839 11774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-315216
I0913 18:21:20.701883 11774 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19636-3724/.minikube/machines/addons-315216/id_rsa Username:docker}
I0913 18:21:20.702410 11774 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19636-3724/.minikube/machines/addons-315216/id_rsa Username:docker}
I0913 18:21:20.709500 11774 host.go:66] Checking if "addons-315216" exists ...
I0913 18:21:20.717198 11774 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19636-3724/.minikube/machines/addons-315216/id_rsa Username:docker}
I0913 18:21:20.719913 11774 out.go:177] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I0913 18:21:20.721391 11774 addons.go:431] installing /etc/kubernetes/addons/storage-provisioner.yaml
I0913 18:21:20.721410 11774 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I0913 18:21:20.721460 11774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-315216
I0913 18:21:20.730894 11774 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19636-3724/.minikube/machines/addons-315216/id_rsa Username:docker}
I0913 18:21:20.732089 11774 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19636-3724/.minikube/machines/addons-315216/id_rsa Username:docker}
I0913 18:21:20.732102 11774 out.go:177] - Using image docker.io/marcnuri/yakd:0.0.5
I0913 18:21:20.733210 11774 addons.go:431] installing /etc/kubernetes/addons/yakd-ns.yaml
I0913 18:21:20.733233 11774 ssh_runner.go:362] scp yakd/yakd-ns.yaml --> /etc/kubernetes/addons/yakd-ns.yaml (171 bytes)
I0913 18:21:20.733288 11774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-315216
I0913 18:21:20.735602 11774 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19636-3724/.minikube/machines/addons-315216/id_rsa Username:docker}
I0913 18:21:20.737183 11774 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19636-3724/.minikube/machines/addons-315216/id_rsa Username:docker}
I0913 18:21:20.746730 11774 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19636-3724/.minikube/machines/addons-315216/id_rsa Username:docker}
I0913 18:21:20.752534 11774 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19636-3724/.minikube/machines/addons-315216/id_rsa Username:docker}
I0913 18:21:20.755015 11774 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19636-3724/.minikube/machines/addons-315216/id_rsa Username:docker}
I0913 18:21:20.759601 11774 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19636-3724/.minikube/machines/addons-315216/id_rsa Username:docker}
I0913 18:21:21.000184 11774 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0913 18:21:21.000234 11774 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I0913 18:21:21.097656 11774 addons.go:431] installing /etc/kubernetes/addons/ig-serviceaccount.yaml
I0913 18:21:21.097696 11774 ssh_runner.go:362] scp inspektor-gadget/ig-serviceaccount.yaml --> /etc/kubernetes/addons/ig-serviceaccount.yaml (80 bytes)
I0913 18:21:21.099511 11774 addons.go:431] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml
I0913 18:21:21.099530 11774 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshotclasses.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml (6471 bytes)
I0913 18:21:21.113541 11774 addons.go:431] installing /etc/kubernetes/addons/metrics-server-deployment.yaml
I0913 18:21:21.113568 11774 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-deployment.yaml (1907 bytes)
I0913 18:21:21.117247 11774 addons.go:431] installing /etc/kubernetes/addons/registry-svc.yaml
I0913 18:21:21.117270 11774 ssh_runner.go:362] scp registry/registry-svc.yaml --> /etc/kubernetes/addons/registry-svc.yaml (398 bytes)
I0913 18:21:21.206385 11774 addons.go:431] installing /etc/kubernetes/addons/ig-role.yaml
I0913 18:21:21.206413 11774 ssh_runner.go:362] scp inspektor-gadget/ig-role.yaml --> /etc/kubernetes/addons/ig-role.yaml (210 bytes)
I0913 18:21:21.208540 11774 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/deployment.yaml
I0913 18:21:21.209125 11774 addons.go:431] installing /etc/kubernetes/addons/rbac-hostpath.yaml
I0913 18:21:21.209147 11774 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-hostpath.yaml --> /etc/kubernetes/addons/rbac-hostpath.yaml (4266 bytes)
I0913 18:21:21.295028 11774 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I0913 18:21:21.295317 11774 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ingress-deploy.yaml
I0913 18:21:21.298881 11774 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/volcano-deployment.yaml
I0913 18:21:21.306064 11774 addons.go:431] installing /etc/kubernetes/addons/registry-proxy.yaml
I0913 18:21:21.306085 11774 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-proxy.yaml (947 bytes)
I0913 18:21:21.394313 11774 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/nvidia-device-plugin.yaml
I0913 18:21:21.395057 11774 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I0913 18:21:21.395523 11774 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner-rancher.yaml
I0913 18:21:21.396534 11774 addons.go:431] installing /etc/kubernetes/addons/metrics-server-rbac.yaml
I0913 18:21:21.396553 11774 ssh_runner.go:362] scp metrics-server/metrics-server-rbac.yaml --> /etc/kubernetes/addons/metrics-server-rbac.yaml (2175 bytes)
I0913 18:21:21.400048 11774 addons.go:431] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml
I0913 18:21:21.400068 11774 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshotcontents.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml (23126 bytes)
I0913 18:21:21.405246 11774 addons.go:431] installing /etc/kubernetes/addons/yakd-sa.yaml
I0913 18:21:21.405264 11774 ssh_runner.go:362] scp yakd/yakd-sa.yaml --> /etc/kubernetes/addons/yakd-sa.yaml (247 bytes)
I0913 18:21:21.417477 11774 addons.go:431] installing /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml
I0913 18:21:21.417567 11774 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-health-monitor-controller.yaml --> /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml (3038 bytes)
I0913 18:21:21.496972 11774 addons.go:431] installing /etc/kubernetes/addons/ig-rolebinding.yaml
I0913 18:21:21.497000 11774 ssh_runner.go:362] scp inspektor-gadget/ig-rolebinding.yaml --> /etc/kubernetes/addons/ig-rolebinding.yaml (244 bytes)
I0913 18:21:21.596153 11774 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ingress-dns-pod.yaml
I0913 18:21:21.715723 11774 addons.go:431] installing /etc/kubernetes/addons/metrics-server-service.yaml
I0913 18:21:21.715804 11774 ssh_runner.go:362] scp metrics-server/metrics-server-service.yaml --> /etc/kubernetes/addons/metrics-server-service.yaml (446 bytes)
I0913 18:21:21.716694 11774 addons.go:431] installing /etc/kubernetes/addons/yakd-crb.yaml
I0913 18:21:21.716746 11774 ssh_runner.go:362] scp yakd/yakd-crb.yaml --> /etc/kubernetes/addons/yakd-crb.yaml (422 bytes)
I0913 18:21:21.803596 11774 addons.go:431] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml
I0913 18:21:21.803670 11774 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshots.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml (19582 bytes)
I0913 18:21:21.806508 11774 addons.go:431] installing /etc/kubernetes/addons/ig-clusterrole.yaml
I0913 18:21:21.806525 11774 ssh_runner.go:362] scp inspektor-gadget/ig-clusterrole.yaml --> /etc/kubernetes/addons/ig-clusterrole.yaml (1485 bytes)
I0913 18:21:21.813445 11774 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/registry-rc.yaml -f /etc/kubernetes/addons/registry-svc.yaml -f /etc/kubernetes/addons/registry-proxy.yaml
I0913 18:21:22.011664 11774 addons.go:431] installing /etc/kubernetes/addons/rbac-external-provisioner.yaml
I0913 18:21:22.011695 11774 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-provisioner.yaml --> /etc/kubernetes/addons/rbac-external-provisioner.yaml (4442 bytes)
I0913 18:21:22.095601 11774 addons.go:431] installing /etc/kubernetes/addons/ig-clusterrolebinding.yaml
I0913 18:21:22.095631 11774 ssh_runner.go:362] scp inspektor-gadget/ig-clusterrolebinding.yaml --> /etc/kubernetes/addons/ig-clusterrolebinding.yaml (274 bytes)
I0913 18:21:22.497385 11774 addons.go:431] installing /etc/kubernetes/addons/ig-crd.yaml
I0913 18:21:22.497415 11774 ssh_runner.go:362] scp inspektor-gadget/ig-crd.yaml --> /etc/kubernetes/addons/ig-crd.yaml (5216 bytes)
I0913 18:21:22.516142 11774 addons.go:431] installing /etc/kubernetes/addons/yakd-svc.yaml
I0913 18:21:22.516224 11774 ssh_runner.go:362] scp yakd/yakd-svc.yaml --> /etc/kubernetes/addons/yakd-svc.yaml (412 bytes)
I0913 18:21:22.594265 11774 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
I0913 18:21:22.606708 11774 addons.go:431] installing /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml
I0913 18:21:22.606739 11774 ssh_runner.go:362] scp volumesnapshots/rbac-volume-snapshot-controller.yaml --> /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml (3545 bytes)
I0913 18:21:22.699398 11774 addons.go:431] installing /etc/kubernetes/addons/ig-daemonset.yaml
I0913 18:21:22.699482 11774 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-daemonset.yaml (7735 bytes)
I0913 18:21:22.806777 11774 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -": (1.806505092s)
I0913 18:21:22.806871 11774 start.go:971] {"host.minikube.internal": 192.168.49.1} host record injected into CoreDNS's ConfigMap
I0913 18:21:22.808152 11774 ssh_runner.go:235] Completed: sudo systemctl start kubelet: (1.8079396s)
I0913 18:21:22.809102 11774 node_ready.go:35] waiting up to 6m0s for node "addons-315216" to be "Ready" ...
I0913 18:21:22.817613 11774 node_ready.go:49] node "addons-315216" has status "Ready":"True"
I0913 18:21:22.817634 11774 node_ready.go:38] duration metric: took 8.466753ms for node "addons-315216" to be "Ready" ...
I0913 18:21:22.817646 11774 pod_ready.go:36] extra waiting up to 6m0s for all system-critical pods including labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0913 18:21:22.909529 11774 pod_ready.go:79] waiting up to 6m0s for pod "coredns-7c65d6cfc9-6hp86" in "kube-system" namespace to be "Ready" ...
I0913 18:21:22.913215 11774 addons.go:431] installing /etc/kubernetes/addons/yakd-dp.yaml
I0913 18:21:22.913281 11774 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/yakd-dp.yaml (2017 bytes)
I0913 18:21:22.997782 11774 addons.go:431] installing /etc/kubernetes/addons/rbac-external-resizer.yaml
I0913 18:21:22.997866 11774 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-resizer.yaml --> /etc/kubernetes/addons/rbac-external-resizer.yaml (2943 bytes)
I0913 18:21:23.012764 11774 addons.go:431] installing /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I0913 18:21:23.012846 11774 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml (1475 bytes)
I0913 18:21:23.114746 11774 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/yakd-ns.yaml -f /etc/kubernetes/addons/yakd-sa.yaml -f /etc/kubernetes/addons/yakd-crb.yaml -f /etc/kubernetes/addons/yakd-svc.yaml -f /etc/kubernetes/addons/yakd-dp.yaml
I0913 18:21:23.311433 11774 kapi.go:214] "coredns" deployment in "kube-system" namespace and "addons-315216" context rescaled to 1 replicas
I0913 18:21:23.396145 11774 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ig-namespace.yaml -f /etc/kubernetes/addons/ig-serviceaccount.yaml -f /etc/kubernetes/addons/ig-role.yaml -f /etc/kubernetes/addons/ig-rolebinding.yaml -f /etc/kubernetes/addons/ig-clusterrole.yaml -f /etc/kubernetes/addons/ig-clusterrolebinding.yaml -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-daemonset.yaml
I0913 18:21:23.415943 11774 addons.go:431] installing /etc/kubernetes/addons/rbac-external-snapshotter.yaml
I0913 18:21:23.415968 11774 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-snapshotter.yaml --> /etc/kubernetes/addons/rbac-external-snapshotter.yaml (3149 bytes)
I0913 18:21:23.511834 11774 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/deployment.yaml: (2.303259754s)
I0913 18:21:23.794546 11774 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I0913 18:21:24.009567 11774 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-attacher.yaml
I0913 18:21:24.009654 11774 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-attacher.yaml (2143 bytes)
I0913 18:21:24.999241 11774 pod_ready.go:103] pod "coredns-7c65d6cfc9-6hp86" in "kube-system" namespace has status "Ready":"False"
I0913 18:21:25.400146 11774 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml
I0913 18:21:25.400229 11774 ssh_runner.go:362] scp csi-hostpath-driver/deploy/csi-hostpath-driverinfo.yaml --> /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml (1274 bytes)
I0913 18:21:25.695583 11774 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-plugin.yaml
I0913 18:21:25.695681 11774 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-plugin.yaml (8201 bytes)
I0913 18:21:26.005534 11774 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-resizer.yaml
I0913 18:21:26.005559 11774 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-resizer.yaml (2191 bytes)
I0913 18:21:26.603064 11774 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-storageclass.yaml
I0913 18:21:26.603140 11774 ssh_runner.go:362] scp csi-hostpath-driver/deploy/csi-hostpath-storageclass.yaml --> /etc/kubernetes/addons/csi-hostpath-storageclass.yaml (846 bytes)
I0913 18:21:26.907208 11774 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/rbac-external-attacher.yaml -f /etc/kubernetes/addons/rbac-hostpath.yaml -f /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml -f /etc/kubernetes/addons/rbac-external-provisioner.yaml -f /etc/kubernetes/addons/rbac-external-resizer.yaml -f /etc/kubernetes/addons/rbac-external-snapshotter.yaml -f /etc/kubernetes/addons/csi-hostpath-attacher.yaml -f /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml -f /etc/kubernetes/addons/csi-hostpath-plugin.yaml -f /etc/kubernetes/addons/csi-hostpath-resizer.yaml -f /etc/kubernetes/addons/csi-hostpath-storageclass.yaml
I0913 18:21:27.417155 11774 pod_ready.go:103] pod "coredns-7c65d6cfc9-6hp86" in "kube-system" namespace has status "Ready":"False"
I0913 18:21:27.796753 11774 ssh_runner.go:362] scp memory --> /var/lib/minikube/google_application_credentials.json (162 bytes)
I0913 18:21:27.796883 11774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-315216
I0913 18:21:27.823889 11774 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19636-3724/.minikube/machines/addons-315216/id_rsa Username:docker}
I0913 18:21:28.594301 11774 ssh_runner.go:362] scp memory --> /var/lib/minikube/google_cloud_project (12 bytes)
I0913 18:21:28.714379 11774 addons.go:234] Setting addon gcp-auth=true in "addons-315216"
I0913 18:21:28.714482 11774 host.go:66] Checking if "addons-315216" exists ...
I0913 18:21:28.715031 11774 cli_runner.go:164] Run: docker container inspect addons-315216 --format={{.State.Status}}
I0913 18:21:28.734258 11774 ssh_runner.go:195] Run: cat /var/lib/minikube/google_application_credentials.json
I0913 18:21:28.734299 11774 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-315216
I0913 18:21:28.748760 11774 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19636-3724/.minikube/machines/addons-315216/id_rsa Username:docker}
I0913 18:21:29.414567 11774 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (8.119500486s)
I0913 18:21:29.414709 11774 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ingress-deploy.yaml: (8.119362945s)
I0913 18:21:29.414764 11774 addons.go:475] Verifying addon ingress=true in "addons-315216"
I0913 18:21:29.417004 11774 out.go:177] * Verifying ingress addon...
I0913 18:21:29.419653 11774 kapi.go:75] Waiting for pod with label "app.kubernetes.io/name=ingress-nginx" in ns "ingress-nginx" ...
I0913 18:21:29.498488 11774 kapi.go:86] Found 3 Pods for label selector app.kubernetes.io/name=ingress-nginx
I0913 18:21:29.498567 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:29.919662 11774 pod_ready.go:103] pod "coredns-7c65d6cfc9-6hp86" in "kube-system" namespace has status "Ready":"False"
I0913 18:21:29.923441 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:30.500533 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:31.000341 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:31.598913 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:31.924367 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:32.302789 11774 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/volcano-deployment.yaml: (11.003859833s)
I0913 18:21:32.302954 11774 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/nvidia-device-plugin.yaml: (10.908597259s)
I0913 18:21:32.303011 11774 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: (10.907936142s)
I0913 18:21:32.303198 11774 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner-rancher.yaml: (10.907644072s)
I0913 18:21:32.303284 11774 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ingress-dns-pod.yaml: (10.707048658s)
I0913 18:21:32.303341 11774 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/registry-rc.yaml -f /etc/kubernetes/addons/registry-svc.yaml -f /etc/kubernetes/addons/registry-proxy.yaml: (10.489875349s)
I0913 18:21:32.303360 11774 addons.go:475] Verifying addon registry=true in "addons-315216"
I0913 18:21:32.303601 11774 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: (9.70923476s)
I0913 18:21:32.303721 11774 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/yakd-ns.yaml -f /etc/kubernetes/addons/yakd-sa.yaml -f /etc/kubernetes/addons/yakd-crb.yaml -f /etc/kubernetes/addons/yakd-svc.yaml -f /etc/kubernetes/addons/yakd-dp.yaml: (9.188888773s)
I0913 18:21:32.303795 11774 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ig-namespace.yaml -f /etc/kubernetes/addons/ig-serviceaccount.yaml -f /etc/kubernetes/addons/ig-role.yaml -f /etc/kubernetes/addons/ig-rolebinding.yaml -f /etc/kubernetes/addons/ig-clusterrole.yaml -f /etc/kubernetes/addons/ig-clusterrolebinding.yaml -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-daemonset.yaml: (8.907546037s)
I0913 18:21:32.303833 11774 addons.go:475] Verifying addon metrics-server=true in "addons-315216"
I0913 18:21:32.303903 11774 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: (8.509321749s)
W0913 18:21:32.303935 11774 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: Process exited with status 1
stdout:
customresourcedefinition.apiextensions.k8s.io/volumesnapshotclasses.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshotcontents.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshots.snapshot.storage.k8s.io created
serviceaccount/snapshot-controller created
clusterrole.rbac.authorization.k8s.io/snapshot-controller-runner created
clusterrolebinding.rbac.authorization.k8s.io/snapshot-controller-role created
role.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
rolebinding.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
deployment.apps/snapshot-controller created
stderr:
error: resource mapping not found for name: "csi-hostpath-snapclass" namespace: "" from "/etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml": no matches for kind "VolumeSnapshotClass" in version "snapshot.storage.k8s.io/v1"
ensure CRDs are installed first
I0913 18:21:32.303953 11774 retry.go:31] will retry after 224.652163ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: Process exited with status 1
stdout:
customresourcedefinition.apiextensions.k8s.io/volumesnapshotclasses.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshotcontents.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshots.snapshot.storage.k8s.io created
serviceaccount/snapshot-controller created
clusterrole.rbac.authorization.k8s.io/snapshot-controller-runner created
clusterrolebinding.rbac.authorization.k8s.io/snapshot-controller-role created
role.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
rolebinding.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
deployment.apps/snapshot-controller created
stderr:
error: resource mapping not found for name: "csi-hostpath-snapclass" namespace: "" from "/etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml": no matches for kind "VolumeSnapshotClass" in version "snapshot.storage.k8s.io/v1"
ensure CRDs are installed first
I0913 18:21:32.305090 11774 out.go:177] * To access YAKD - Kubernetes Dashboard, wait for Pod to be ready and run the following command:
minikube -p addons-315216 service yakd-dashboard -n yakd-dashboard
I0913 18:21:32.305198 11774 out.go:177] * Verifying registry addon...
I0913 18:21:32.308136 11774 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=registry" in ns "kube-system" ...
W0913 18:21:32.311988 11774 out.go:270] ! Enabling 'default-storageclass' returned an error: running callbacks: [Error making standard the default storage class: Error while marking storage class local-path as non-default: Operation cannot be fulfilled on storageclasses.storage.k8s.io "local-path": the object has been modified; please apply your changes to the latest version and try again]
I0913 18:21:32.313154 11774 kapi.go:86] Found 2 Pods for label selector kubernetes.io/minikube-addons=registry
I0913 18:21:32.313209 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0913 18:21:32.419695 11774 pod_ready.go:103] pod "coredns-7c65d6cfc9-6hp86" in "kube-system" namespace has status "Ready":"False"
I0913 18:21:32.497263 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:32.529635 11774 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply --force -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I0913 18:21:32.811790 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0913 18:21:33.006171 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:33.311535 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0913 18:21:33.426789 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:33.811916 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0913 18:21:33.819832 11774 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/rbac-external-attacher.yaml -f /etc/kubernetes/addons/rbac-hostpath.yaml -f /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml -f /etc/kubernetes/addons/rbac-external-provisioner.yaml -f /etc/kubernetes/addons/rbac-external-resizer.yaml -f /etc/kubernetes/addons/rbac-external-snapshotter.yaml -f /etc/kubernetes/addons/csi-hostpath-attacher.yaml -f /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml -f /etc/kubernetes/addons/csi-hostpath-plugin.yaml -f /etc/kubernetes/addons/csi-hostpath-resizer.yaml -f /etc/kubernetes/addons/csi-hostpath-storageclass.yaml: (6.912494384s)
I0913 18:21:33.819888 11774 addons.go:475] Verifying addon csi-hostpath-driver=true in "addons-315216"
I0913 18:21:33.820106 11774 ssh_runner.go:235] Completed: cat /var/lib/minikube/google_application_credentials.json: (5.085810144s)
I0913 18:21:33.821541 11774 out.go:177] * Verifying csi-hostpath-driver addon...
I0913 18:21:33.821600 11774 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.3
I0913 18:21:33.822720 11774 out.go:177] - Using image gcr.io/k8s-minikube/gcp-auth-webhook:v0.1.2
I0913 18:21:33.823625 11774 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=csi-hostpath-driver" in ns "kube-system" ...
I0913 18:21:33.823832 11774 addons.go:431] installing /etc/kubernetes/addons/gcp-auth-ns.yaml
I0913 18:21:33.823850 11774 ssh_runner.go:362] scp gcp-auth/gcp-auth-ns.yaml --> /etc/kubernetes/addons/gcp-auth-ns.yaml (700 bytes)
I0913 18:21:33.904039 11774 kapi.go:86] Found 3 Pods for label selector kubernetes.io/minikube-addons=csi-hostpath-driver
I0913 18:21:33.904064 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:33.917802 11774 addons.go:431] installing /etc/kubernetes/addons/gcp-auth-service.yaml
I0913 18:21:33.917826 11774 ssh_runner.go:362] scp gcp-auth/gcp-auth-service.yaml --> /etc/kubernetes/addons/gcp-auth-service.yaml (788 bytes)
I0913 18:21:33.923230 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:34.005112 11774 addons.go:431] installing /etc/kubernetes/addons/gcp-auth-webhook.yaml
I0913 18:21:34.005138 11774 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/gcp-auth-webhook.yaml (5421 bytes)
I0913 18:21:34.100384 11774 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/gcp-auth-ns.yaml -f /etc/kubernetes/addons/gcp-auth-service.yaml -f /etc/kubernetes/addons/gcp-auth-webhook.yaml
I0913 18:21:34.312061 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0913 18:21:34.398003 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:34.423260 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:34.798826 11774 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply --force -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: (2.269143586s)
I0913 18:21:34.812936 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0913 18:21:34.897457 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:34.917582 11774 pod_ready.go:103] pod "coredns-7c65d6cfc9-6hp86" in "kube-system" namespace has status "Ready":"False"
I0913 18:21:34.922870 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:35.314915 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0913 18:21:35.413771 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:35.419146 11774 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/gcp-auth-ns.yaml -f /etc/kubernetes/addons/gcp-auth-service.yaml -f /etc/kubernetes/addons/gcp-auth-webhook.yaml: (1.318696529s)
I0913 18:21:35.421424 11774 addons.go:475] Verifying addon gcp-auth=true in "addons-315216"
I0913 18:21:35.423187 11774 out.go:177] * Verifying gcp-auth addon...
I0913 18:21:35.425314 11774 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=gcp-auth" in ns "gcp-auth" ...
I0913 18:21:35.513108 11774 kapi.go:86] Found 0 Pods for label selector kubernetes.io/minikube-addons=gcp-auth
I0913 18:21:35.514446 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:35.811833 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0913 18:21:35.827965 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:35.915764 11774 pod_ready.go:98] pod "coredns-7c65d6cfc9-6hp86" in "kube-system" namespace has status phase "Succeeded" (skipping!): {Phase:Succeeded Conditions:[{Type:PodReadyToStartContainers Status:False LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-09-13 18:21:35 +0000 UTC Reason: Message:} {Type:Initialized Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-09-13 18:21:21 +0000 UTC Reason:PodCompleted Message:} {Type:Ready Status:False LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-09-13 18:21:21 +0000 UTC Reason:PodCompleted Message:} {Type:ContainersReady Status:False LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-09-13 18:21:21 +0000 UTC Reason:PodCompleted Message:} {Type:PodScheduled Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-09-13 18:21:21 +0000 UTC Reason: Message:}] Message: Reason: NominatedNodeName: HostIP:192.168.49.2 HostIPs:[{IP:192.168.49.2
}] PodIP:10.244.0.3 PodIPs:[{IP:10.244.0.3}] StartTime:2024-09-13 18:21:21 +0000 UTC InitContainerStatuses:[] ContainerStatuses:[{Name:coredns State:{Waiting:nil Running:nil Terminated:&ContainerStateTerminated{ExitCode:0,Signal:0,Reason:Completed,Message:,StartedAt:2024-09-13 18:21:23 +0000 UTC,FinishedAt:2024-09-13 18:21:34 +0000 UTC,ContainerID:docker://5ca960f2c2b21944355698ab2396f91ab5e7561023a354e921920997231a4b0f,}} LastTerminationState:{Waiting:nil Running:nil Terminated:nil} Ready:false RestartCount:0 Image:registry.k8s.io/coredns/coredns:v1.11.3 ImageID:docker-pullable://registry.k8s.io/coredns/coredns@sha256:9caabbf6238b189a65d0d6e6ac138de60d6a1c419e5a341fbbb7c78382559c6e ContainerID:docker://5ca960f2c2b21944355698ab2396f91ab5e7561023a354e921920997231a4b0f Started:0xc00098c6d0 AllocatedResources:map[] Resources:nil VolumeMounts:[{Name:config-volume MountPath:/etc/coredns ReadOnly:true RecursiveReadOnly:0xc001df07f0} {Name:kube-api-access-6cpc6 MountPath:/var/run/secrets/kubernetes.io/serviceaccount
ReadOnly:true RecursiveReadOnly:0xc001df0800}] User:nil AllocatedResourcesStatus:[]}] QOSClass:Burstable EphemeralContainerStatuses:[] Resize: ResourceClaimStatuses:[]}
I0913 18:21:35.915798 11774 pod_ready.go:82] duration metric: took 13.006229311s for pod "coredns-7c65d6cfc9-6hp86" in "kube-system" namespace to be "Ready" ...
E0913 18:21:35.915812 11774 pod_ready.go:67] WaitExtra: waitPodCondition: pod "coredns-7c65d6cfc9-6hp86" in "kube-system" namespace has status phase "Succeeded" (skipping!): {Phase:Succeeded Conditions:[{Type:PodReadyToStartContainers Status:False LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-09-13 18:21:35 +0000 UTC Reason: Message:} {Type:Initialized Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-09-13 18:21:21 +0000 UTC Reason:PodCompleted Message:} {Type:Ready Status:False LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-09-13 18:21:21 +0000 UTC Reason:PodCompleted Message:} {Type:ContainersReady Status:False LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-09-13 18:21:21 +0000 UTC Reason:PodCompleted Message:} {Type:PodScheduled Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-09-13 18:21:21 +0000 UTC Reason: Message:}] Message: Reason: NominatedNodeName: HostIP:192.168.4
9.2 HostIPs:[{IP:192.168.49.2}] PodIP:10.244.0.3 PodIPs:[{IP:10.244.0.3}] StartTime:2024-09-13 18:21:21 +0000 UTC InitContainerStatuses:[] ContainerStatuses:[{Name:coredns State:{Waiting:nil Running:nil Terminated:&ContainerStateTerminated{ExitCode:0,Signal:0,Reason:Completed,Message:,StartedAt:2024-09-13 18:21:23 +0000 UTC,FinishedAt:2024-09-13 18:21:34 +0000 UTC,ContainerID:docker://5ca960f2c2b21944355698ab2396f91ab5e7561023a354e921920997231a4b0f,}} LastTerminationState:{Waiting:nil Running:nil Terminated:nil} Ready:false RestartCount:0 Image:registry.k8s.io/coredns/coredns:v1.11.3 ImageID:docker-pullable://registry.k8s.io/coredns/coredns@sha256:9caabbf6238b189a65d0d6e6ac138de60d6a1c419e5a341fbbb7c78382559c6e ContainerID:docker://5ca960f2c2b21944355698ab2396f91ab5e7561023a354e921920997231a4b0f Started:0xc00098c6d0 AllocatedResources:map[] Resources:nil VolumeMounts:[{Name:config-volume MountPath:/etc/coredns ReadOnly:true RecursiveReadOnly:0xc001df07f0} {Name:kube-api-access-6cpc6 MountPath:/var/run/secrets
/kubernetes.io/serviceaccount ReadOnly:true RecursiveReadOnly:0xc001df0800}] User:nil AllocatedResourcesStatus:[]}] QOSClass:Burstable EphemeralContainerStatuses:[] Resize: ResourceClaimStatuses:[]}
I0913 18:21:35.915823 11774 pod_ready.go:79] waiting up to 6m0s for pod "coredns-7c65d6cfc9-nj2wd" in "kube-system" namespace to be "Ready" ...
I0913 18:21:35.922638 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:36.312014 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0913 18:21:36.328305 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:36.423380 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:36.812286 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0913 18:21:36.827185 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:36.923030 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:37.312582 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0913 18:21:37.328938 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:37.423195 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:37.811764 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0913 18:21:37.827831 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:37.921294 11774 pod_ready.go:103] pod "coredns-7c65d6cfc9-nj2wd" in "kube-system" namespace has status "Ready":"False"
I0913 18:21:37.923517 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:38.311884 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0913 18:21:38.328299 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:38.423274 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:38.811514 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0913 18:21:38.827260 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:38.922892 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:39.312529 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0913 18:21:39.327893 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:39.422899 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:39.811394 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0913 18:21:39.827763 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:39.923191 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:39.923659 11774 pod_ready.go:103] pod "coredns-7c65d6cfc9-nj2wd" in "kube-system" namespace has status "Ready":"False"
I0913 18:21:40.311866 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0913 18:21:40.327862 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:40.423465 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:40.812317 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0913 18:21:40.827790 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:40.923115 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:41.311748 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0913 18:21:41.332333 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:41.422521 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:41.811711 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0913 18:21:41.827492 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:41.922754 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:42.311635 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0913 18:21:42.327257 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:42.420938 11774 pod_ready.go:103] pod "coredns-7c65d6cfc9-nj2wd" in "kube-system" namespace has status "Ready":"False"
I0913 18:21:42.423259 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:42.812083 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0913 18:21:42.828807 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:42.923150 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:43.312092 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0913 18:21:43.328201 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:43.422787 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:43.812316 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0913 18:21:43.827446 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:43.923091 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:44.311183 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0913 18:21:44.327610 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:44.421849 11774 pod_ready.go:103] pod "coredns-7c65d6cfc9-nj2wd" in "kube-system" namespace has status "Ready":"False"
I0913 18:21:44.422408 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:44.812385 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0913 18:21:44.827739 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:44.923038 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:45.316448 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0913 18:21:45.327583 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:45.423085 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:45.812210 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0913 18:21:45.828217 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:45.923346 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:46.312170 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0913 18:21:46.328139 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:46.423010 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:46.812384 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0913 18:21:46.827920 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:46.920626 11774 pod_ready.go:103] pod "coredns-7c65d6cfc9-nj2wd" in "kube-system" namespace has status "Ready":"False"
I0913 18:21:46.923234 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:47.311375 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0913 18:21:47.327411 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:47.422896 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:47.812370 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0913 18:21:47.827866 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:47.922908 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:48.314929 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0913 18:21:48.419098 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:48.423541 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:48.811636 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0913 18:21:48.827303 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:48.922759 11774 pod_ready.go:103] pod "coredns-7c65d6cfc9-nj2wd" in "kube-system" namespace has status "Ready":"False"
I0913 18:21:48.923463 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:49.312024 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0913 18:21:49.331408 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:49.423186 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:49.812036 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0913 18:21:49.828393 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:49.923124 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:50.314726 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0913 18:21:50.327943 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:50.423668 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:50.812013 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0913 18:21:50.827620 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:50.922564 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:51.312787 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0913 18:21:51.327792 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:51.421593 11774 pod_ready.go:103] pod "coredns-7c65d6cfc9-nj2wd" in "kube-system" namespace has status "Ready":"False"
I0913 18:21:51.423714 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:51.811725 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0913 18:21:51.828099 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:51.923461 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:52.312108 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0913 18:21:52.328603 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:52.422942 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:52.812256 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0913 18:21:52.826518 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:52.923001 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:53.312640 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0913 18:21:53.328216 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:53.423353 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:53.811520 11774 kapi.go:107] duration metric: took 21.503379767s to wait for kubernetes.io/minikube-addons=registry ...
I0913 18:21:53.827552 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:54.000552 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:54.001550 11774 pod_ready.go:103] pod "coredns-7c65d6cfc9-nj2wd" in "kube-system" namespace has status "Ready":"False"
I0913 18:21:54.328967 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:54.422891 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:54.828157 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:54.922795 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:55.327713 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:55.423148 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:55.828607 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:55.922867 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:56.328241 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:56.421773 11774 pod_ready.go:103] pod "coredns-7c65d6cfc9-nj2wd" in "kube-system" namespace has status "Ready":"False"
I0913 18:21:56.423089 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:56.827926 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:56.927813 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:57.328200 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:57.423043 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:57.828804 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:57.924354 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:58.328609 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:58.422187 11774 pod_ready.go:103] pod "coredns-7c65d6cfc9-nj2wd" in "kube-system" namespace has status "Ready":"False"
I0913 18:21:58.422904 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:58.828101 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:58.927866 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:59.408827 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:59.478301 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:21:59.828083 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:21:59.923103 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:00.328029 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:22:00.423426 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:00.828694 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:22:00.923292 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:00.926988 11774 pod_ready.go:103] pod "coredns-7c65d6cfc9-nj2wd" in "kube-system" namespace has status "Ready":"False"
I0913 18:22:01.329441 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:22:01.429479 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:01.827812 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:22:01.923509 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:02.328129 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:22:02.423900 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:02.423983 11774 pod_ready.go:93] pod "coredns-7c65d6cfc9-nj2wd" in "kube-system" namespace has status "Ready":"True"
I0913 18:22:02.424001 11774 pod_ready.go:82] duration metric: took 26.508168678s for pod "coredns-7c65d6cfc9-nj2wd" in "kube-system" namespace to be "Ready" ...
I0913 18:22:02.424012 11774 pod_ready.go:79] waiting up to 6m0s for pod "etcd-addons-315216" in "kube-system" namespace to be "Ready" ...
I0913 18:22:02.428411 11774 pod_ready.go:93] pod "etcd-addons-315216" in "kube-system" namespace has status "Ready":"True"
I0913 18:22:02.428485 11774 pod_ready.go:82] duration metric: took 4.463179ms for pod "etcd-addons-315216" in "kube-system" namespace to be "Ready" ...
I0913 18:22:02.428507 11774 pod_ready.go:79] waiting up to 6m0s for pod "kube-apiserver-addons-315216" in "kube-system" namespace to be "Ready" ...
I0913 18:22:02.432686 11774 pod_ready.go:93] pod "kube-apiserver-addons-315216" in "kube-system" namespace has status "Ready":"True"
I0913 18:22:02.432705 11774 pod_ready.go:82] duration metric: took 4.183014ms for pod "kube-apiserver-addons-315216" in "kube-system" namespace to be "Ready" ...
I0913 18:22:02.432739 11774 pod_ready.go:79] waiting up to 6m0s for pod "kube-controller-manager-addons-315216" in "kube-system" namespace to be "Ready" ...
I0913 18:22:02.436825 11774 pod_ready.go:93] pod "kube-controller-manager-addons-315216" in "kube-system" namespace has status "Ready":"True"
I0913 18:22:02.436844 11774 pod_ready.go:82] duration metric: took 4.096685ms for pod "kube-controller-manager-addons-315216" in "kube-system" namespace to be "Ready" ...
I0913 18:22:02.436855 11774 pod_ready.go:79] waiting up to 6m0s for pod "kube-proxy-tfw7l" in "kube-system" namespace to be "Ready" ...
I0913 18:22:02.440526 11774 pod_ready.go:93] pod "kube-proxy-tfw7l" in "kube-system" namespace has status "Ready":"True"
I0913 18:22:02.440544 11774 pod_ready.go:82] duration metric: took 3.683ms for pod "kube-proxy-tfw7l" in "kube-system" namespace to be "Ready" ...
I0913 18:22:02.440553 11774 pod_ready.go:79] waiting up to 6m0s for pod "kube-scheduler-addons-315216" in "kube-system" namespace to be "Ready" ...
I0913 18:22:02.820128 11774 pod_ready.go:93] pod "kube-scheduler-addons-315216" in "kube-system" namespace has status "Ready":"True"
I0913 18:22:02.820150 11774 pod_ready.go:82] duration metric: took 379.591304ms for pod "kube-scheduler-addons-315216" in "kube-system" namespace to be "Ready" ...
I0913 18:22:02.820157 11774 pod_ready.go:39] duration metric: took 40.002499793s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0913 18:22:02.820175 11774 api_server.go:52] waiting for apiserver process to appear ...
I0913 18:22:02.820229 11774 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0913 18:22:02.830758 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:22:02.903719 11774 api_server.go:72] duration metric: took 42.333539309s to wait for apiserver process to appear ...
I0913 18:22:02.903749 11774 api_server.go:88] waiting for apiserver healthz status ...
I0913 18:22:02.903776 11774 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8443/healthz ...
I0913 18:22:02.909144 11774 api_server.go:279] https://192.168.49.2:8443/healthz returned 200:
ok
I0913 18:22:02.910049 11774 api_server.go:141] control plane version: v1.31.1
I0913 18:22:02.910071 11774 api_server.go:131] duration metric: took 6.315438ms to wait for apiserver health ...
I0913 18:22:02.910078 11774 system_pods.go:43] waiting for kube-system pods to appear ...
I0913 18:22:02.923903 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:03.025508 11774 system_pods.go:59] 17 kube-system pods found
I0913 18:22:03.025550 11774 system_pods.go:61] "coredns-7c65d6cfc9-nj2wd" [fd21cc8a-bae9-439d-bd4b-88a00fc27467] Running
I0913 18:22:03.025563 11774 system_pods.go:61] "csi-hostpath-attacher-0" [be57972c-58d1-4808-8ff4-ca67190811c9] Pending / Ready:ContainersNotReady (containers with unready status: [csi-attacher]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-attacher])
I0913 18:22:03.025571 11774 system_pods.go:61] "csi-hostpath-resizer-0" [58cef56b-414e-4c64-add2-32d236f09463] Pending / Ready:ContainersNotReady (containers with unready status: [csi-resizer]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-resizer])
I0913 18:22:03.025583 11774 system_pods.go:61] "csi-hostpathplugin-6xs52" [5f3cdfcc-6d9c-4003-b6ad-4fa772f91e6b] Pending / Ready:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter])
I0913 18:22:03.025590 11774 system_pods.go:61] "etcd-addons-315216" [c7b8c5da-ed34-4550-88cb-b57283301f35] Running
I0913 18:22:03.025597 11774 system_pods.go:61] "kube-apiserver-addons-315216" [65bc5844-551a-424d-b9c4-f84ae4e5e50b] Running
I0913 18:22:03.025603 11774 system_pods.go:61] "kube-controller-manager-addons-315216" [1eaef35a-ef2c-45ba-964a-d4c711bf0a58] Running
I0913 18:22:03.025609 11774 system_pods.go:61] "kube-ingress-dns-minikube" [3c02732d-d3f3-4f79-890e-2843e593ca75] Running
I0913 18:22:03.025615 11774 system_pods.go:61] "kube-proxy-tfw7l" [7125370e-1597-42d4-b4a4-fa01e11f5ceb] Running
I0913 18:22:03.025624 11774 system_pods.go:61] "kube-scheduler-addons-315216" [7b24d0a6-8ae7-44b1-931b-542172178637] Running
I0913 18:22:03.025629 11774 system_pods.go:61] "metrics-server-84c5f94fbc-5r2xh" [9fd33765-d89a-4aca-9d83-b2ef709128c4] Running
I0913 18:22:03.025638 11774 system_pods.go:61] "nvidia-device-plugin-daemonset-tvp28" [845bb4f2-4781-45e0-a654-5b57584b3a70] Running
I0913 18:22:03.025643 11774 system_pods.go:61] "registry-66c9cd494c-cvtgf" [58047cca-a75a-41df-bc9f-a91dc5a547ca] Running
I0913 18:22:03.025650 11774 system_pods.go:61] "registry-proxy-f7ltj" [7dfd72e6-65ac-495b-9832-0e900d22d7e6] Running
I0913 18:22:03.025654 11774 system_pods.go:61] "snapshot-controller-56fcc65765-ntlvm" [41f0b45e-62eb-4d4b-9bee-ebfbe102b16e] Running
I0913 18:22:03.025660 11774 system_pods.go:61] "snapshot-controller-56fcc65765-vsk8h" [af53978d-78a7-4c1a-960b-e203fade05e4] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
I0913 18:22:03.025669 11774 system_pods.go:61] "storage-provisioner" [74ceb6a0-d02e-4906-9c7b-1fff3590e1d4] Running
I0913 18:22:03.026393 11774 system_pods.go:74] duration metric: took 116.302133ms to wait for pod list to return data ...
I0913 18:22:03.026413 11774 default_sa.go:34] waiting for default service account to be created ...
I0913 18:22:03.219552 11774 default_sa.go:45] found service account: "default"
I0913 18:22:03.219576 11774 default_sa.go:55] duration metric: took 193.156753ms for default service account to be created ...
I0913 18:22:03.219585 11774 system_pods.go:116] waiting for k8s-apps to be running ...
I0913 18:22:03.328839 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:22:03.423119 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:03.425924 11774 system_pods.go:86] 17 kube-system pods found
I0913 18:22:03.425952 11774 system_pods.go:89] "coredns-7c65d6cfc9-nj2wd" [fd21cc8a-bae9-439d-bd4b-88a00fc27467] Running
I0913 18:22:03.425966 11774 system_pods.go:89] "csi-hostpath-attacher-0" [be57972c-58d1-4808-8ff4-ca67190811c9] Pending / Ready:ContainersNotReady (containers with unready status: [csi-attacher]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-attacher])
I0913 18:22:03.425974 11774 system_pods.go:89] "csi-hostpath-resizer-0" [58cef56b-414e-4c64-add2-32d236f09463] Pending / Ready:ContainersNotReady (containers with unready status: [csi-resizer]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-resizer])
I0913 18:22:03.425984 11774 system_pods.go:89] "csi-hostpathplugin-6xs52" [5f3cdfcc-6d9c-4003-b6ad-4fa772f91e6b] Pending / Ready:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter])
I0913 18:22:03.425993 11774 system_pods.go:89] "etcd-addons-315216" [c7b8c5da-ed34-4550-88cb-b57283301f35] Running
I0913 18:22:03.426002 11774 system_pods.go:89] "kube-apiserver-addons-315216" [65bc5844-551a-424d-b9c4-f84ae4e5e50b] Running
I0913 18:22:03.426011 11774 system_pods.go:89] "kube-controller-manager-addons-315216" [1eaef35a-ef2c-45ba-964a-d4c711bf0a58] Running
I0913 18:22:03.426020 11774 system_pods.go:89] "kube-ingress-dns-minikube" [3c02732d-d3f3-4f79-890e-2843e593ca75] Running
I0913 18:22:03.426028 11774 system_pods.go:89] "kube-proxy-tfw7l" [7125370e-1597-42d4-b4a4-fa01e11f5ceb] Running
I0913 18:22:03.426037 11774 system_pods.go:89] "kube-scheduler-addons-315216" [7b24d0a6-8ae7-44b1-931b-542172178637] Running
I0913 18:22:03.426042 11774 system_pods.go:89] "metrics-server-84c5f94fbc-5r2xh" [9fd33765-d89a-4aca-9d83-b2ef709128c4] Running
I0913 18:22:03.426051 11774 system_pods.go:89] "nvidia-device-plugin-daemonset-tvp28" [845bb4f2-4781-45e0-a654-5b57584b3a70] Running
I0913 18:22:03.426057 11774 system_pods.go:89] "registry-66c9cd494c-cvtgf" [58047cca-a75a-41df-bc9f-a91dc5a547ca] Running
I0913 18:22:03.426062 11774 system_pods.go:89] "registry-proxy-f7ltj" [7dfd72e6-65ac-495b-9832-0e900d22d7e6] Running
I0913 18:22:03.426067 11774 system_pods.go:89] "snapshot-controller-56fcc65765-ntlvm" [41f0b45e-62eb-4d4b-9bee-ebfbe102b16e] Running
I0913 18:22:03.426081 11774 system_pods.go:89] "snapshot-controller-56fcc65765-vsk8h" [af53978d-78a7-4c1a-960b-e203fade05e4] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
I0913 18:22:03.426091 11774 system_pods.go:89] "storage-provisioner" [74ceb6a0-d02e-4906-9c7b-1fff3590e1d4] Running
I0913 18:22:03.426102 11774 system_pods.go:126] duration metric: took 206.509869ms to wait for k8s-apps to be running ...
I0913 18:22:03.426114 11774 system_svc.go:44] waiting for kubelet service to be running ....
I0913 18:22:03.426166 11774 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I0913 18:22:03.440087 11774 system_svc.go:56] duration metric: took 13.963656ms WaitForService to wait for kubelet
I0913 18:22:03.440115 11774 kubeadm.go:582] duration metric: took 42.869940343s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0913 18:22:03.440138 11774 node_conditions.go:102] verifying NodePressure condition ...
I0913 18:22:03.619308 11774 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I0913 18:22:03.619339 11774 node_conditions.go:123] node cpu capacity is 8
I0913 18:22:03.619353 11774 node_conditions.go:105] duration metric: took 179.209734ms to run NodePressure ...
I0913 18:22:03.619365 11774 start.go:241] waiting for startup goroutines ...
I0913 18:22:03.828211 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:22:03.923858 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:04.328752 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:22:04.428154 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:04.828836 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:22:04.923056 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:05.328604 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:22:05.428337 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:05.828362 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:22:05.924131 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:06.327943 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:22:06.428768 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:06.828271 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:22:06.928410 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:07.327650 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:22:07.423524 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:07.827283 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:22:07.928592 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:08.328624 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:22:08.423998 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:08.827766 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:22:08.923148 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:09.328852 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:22:09.423828 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:09.827795 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:22:09.923403 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:10.327890 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:22:10.423587 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:10.828165 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:22:10.925947 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:11.328365 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:22:11.423861 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:11.827639 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:22:11.923489 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:12.329584 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:22:12.423540 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:12.827696 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:22:12.922663 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:13.328168 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:22:13.423570 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:13.828623 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:22:13.924069 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:14.327833 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:22:14.428634 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:14.828009 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:22:14.923921 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:15.328565 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:22:15.428830 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:15.827459 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:22:15.924286 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:16.328226 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0913 18:22:16.423636 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:16.827512 11774 kapi.go:107] duration metric: took 43.003884236s to wait for kubernetes.io/minikube-addons=csi-hostpath-driver ...
I0913 18:22:16.923643 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:17.423081 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:17.923836 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:18.423478 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:18.923988 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:19.424151 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:19.924069 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:20.423554 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:20.923373 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:21.423731 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:21.923566 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:22.423431 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:22.923111 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:23.423436 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:23.923085 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:24.424168 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:24.923607 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:25.422931 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:25.923591 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:26.423759 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:26.923250 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:27.423759 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:27.924068 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:28.423908 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:28.923416 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:29.422743 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:29.922647 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:30.423280 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:30.924261 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:31.423189 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:31.923932 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:32.422814 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:32.923845 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:33.423434 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:33.924398 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:34.424657 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:34.924489 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:35.423970 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:35.924145 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:36.424452 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:36.923966 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:37.436679 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:37.930282 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:38.423777 11774 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0913 18:22:38.923494 11774 kapi.go:107] duration metric: took 1m9.503838891s to wait for app.kubernetes.io/name=ingress-nginx ...
I0913 18:22:57.928840 11774 kapi.go:86] Found 1 Pods for label selector kubernetes.io/minikube-addons=gcp-auth
I0913 18:22:57.928861 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:22:58.428475 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:22:58.928031 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:22:59.429012 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:22:59.928819 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:00.428737 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:00.928541 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:01.428440 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:01.928469 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:02.428264 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:02.928355 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:03.427759 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:03.928695 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:04.428282 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:04.928335 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:05.429025 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:05.928632 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:06.428227 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:06.927992 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:07.428790 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:07.929348 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:08.428028 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:08.928472 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:09.428110 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:09.928761 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:10.428651 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:10.928577 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:11.428307 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:11.928227 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:12.428978 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:12.928837 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:13.428742 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:13.928783 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:14.428536 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:14.928413 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:15.428720 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:15.928412 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:16.427896 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:16.928521 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:17.428557 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:17.928338 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:18.427873 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:18.928684 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:19.428257 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:19.928044 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:20.428665 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:20.928561 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:21.428624 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:21.928384 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:22.428129 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:22.927855 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:23.428433 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:23.928343 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:24.428018 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:24.928947 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:25.428623 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:25.928609 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:26.428525 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:26.928478 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:27.428662 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:27.928889 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:28.428907 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:28.927816 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:29.428629 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:29.928280 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:30.427776 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:30.928748 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:31.428023 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:31.929145 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:32.429134 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:32.928563 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:33.428047 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:33.928900 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:34.428343 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:34.928140 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:35.429006 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:35.929194 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:36.429265 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:36.927940 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:37.428796 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:37.928973 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:38.428449 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:38.928252 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:39.428197 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:39.929175 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:40.428781 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:40.928698 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:41.428090 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:41.929092 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:42.428788 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:42.928666 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:43.428325 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:43.928335 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:44.428677 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:44.928449 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:45.428157 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:45.928993 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:46.429559 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:46.928449 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:47.428421 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:47.928736 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:48.428168 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:48.927912 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:49.496278 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:49.928861 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:50.428353 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:50.928045 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:51.428831 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:51.928673 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:52.428324 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:52.928192 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:53.427895 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:53.928982 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:54.428786 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:54.928836 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:55.428524 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:55.928247 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:56.427809 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:56.928832 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:57.428639 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:57.928623 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:58.428680 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:58.928297 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:59.427978 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:23:59.929111 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:24:00.428966 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:24:00.928817 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:24:01.428920 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:24:01.928713 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:24:02.429379 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:24:02.928953 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:24:03.428772 11774 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0913 18:24:03.932958 11774 kapi.go:107] duration metric: took 2m28.507642144s to wait for kubernetes.io/minikube-addons=gcp-auth ...
I0913 18:24:03.934510 11774 out.go:177] * Your GCP credentials will now be mounted into every pod created in the addons-315216 cluster.
I0913 18:24:03.935941 11774 out.go:177] * If you don't want your credentials mounted into a specific pod, add a label with the `gcp-auth-skip-secret` key to your pod configuration.
I0913 18:24:03.937252 11774 out.go:177] * If you want existing pods to be mounted with credentials, either recreate them or rerun addons enable with --refresh.
I0913 18:24:03.938751 11774 out.go:177] * Enabled addons: cloud-spanner, storage-provisioner, volcano, nvidia-device-plugin, ingress-dns, inspektor-gadget, metrics-server, yakd, storage-provisioner-rancher, volumesnapshots, registry, csi-hostpath-driver, ingress, gcp-auth
I0913 18:24:03.940012 11774 addons.go:510] duration metric: took 2m43.369745411s for enable addons: enabled=[cloud-spanner storage-provisioner volcano nvidia-device-plugin ingress-dns inspektor-gadget metrics-server yakd storage-provisioner-rancher volumesnapshots registry csi-hostpath-driver ingress gcp-auth]
I0913 18:24:03.940054 11774 start.go:246] waiting for cluster config update ...
I0913 18:24:03.940080 11774 start.go:255] writing updated cluster config ...
I0913 18:24:03.940339 11774 ssh_runner.go:195] Run: rm -f paused
I0913 18:24:03.987557 11774 start.go:600] kubectl: 1.31.0, cluster: 1.31.1 (minor skew: 0)
I0913 18:24:03.989066 11774 out.go:177] * Done! kubectl is now configured to use "addons-315216" cluster and "default" namespace by default
==> Docker <==
Sep 13 18:33:41 addons-315216 dockerd[1338]: time="2024-09-13T18:33:41.114665735Z" level=info msg="ignoring event" container=e730bad4eddfd7fb61660e09ab8137d98aa942330f61f66d46c582668394c761 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 13 18:33:41 addons-315216 dockerd[1338]: time="2024-09-13T18:33:41.195305005Z" level=info msg="ignoring event" container=7c5d10e708e012586e28c5d7200eb2152c2e0a2fd57814fcecabddd014b32a48 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 13 18:33:41 addons-315216 dockerd[1338]: time="2024-09-13T18:33:41.196097947Z" level=info msg="ignoring event" container=cf61891d73b2104be45270a6ba0b802f886583a164958e949b07341a92218472 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 13 18:33:41 addons-315216 dockerd[1338]: time="2024-09-13T18:33:41.198427008Z" level=info msg="ignoring event" container=dc0098d012d2d3e6ca87eff20568cfb85c46a2523a9584158a87cdbfca02a427 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 13 18:33:41 addons-315216 dockerd[1338]: time="2024-09-13T18:33:41.212268222Z" level=info msg="ignoring event" container=f851c23625d4d68da50faabfd3a76baa1ae03df2e65cf5d6884150c569390476 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 13 18:33:41 addons-315216 cri-dockerd[1603]: time="2024-09-13T18:33:41Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"csi-hostpath-attacher-0_kube-system\": unexpected command output Device \"eth0\" does not exist.\n with error: exit status 1"
Sep 13 18:33:41 addons-315216 dockerd[1338]: time="2024-09-13T18:33:41.444842210Z" level=info msg="ignoring event" container=2d973abe7158676f97e3fb2d3334d4e481fd46dabbc7bcee62762437547831ee module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 13 18:33:41 addons-315216 dockerd[1338]: time="2024-09-13T18:33:41.444889078Z" level=info msg="ignoring event" container=6f5c559beae39d16452a4da942e086b4a679558a678dc38e54196eefc5fd99ac module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 13 18:33:41 addons-315216 dockerd[1338]: time="2024-09-13T18:33:41.494139502Z" level=info msg="ignoring event" container=78419849c7facdbe3ce7e508513b1590bc833d2df12b70e84b09470401005ed2 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 13 18:33:47 addons-315216 dockerd[1338]: time="2024-09-13T18:33:47.411872476Z" level=info msg="ignoring event" container=6848075bd0d508d26582ee4fb1b1c4ada191ba9ff66dd6575d248a64aac3097f module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 13 18:33:47 addons-315216 dockerd[1338]: time="2024-09-13T18:33:47.412646493Z" level=info msg="ignoring event" container=e2fdf049e04be2fd30424ae2c5bd79d81da97f7d3dcdfbde3fb6f66af1a327fc module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 13 18:33:47 addons-315216 dockerd[1338]: time="2024-09-13T18:33:47.529143660Z" level=info msg="ignoring event" container=d51d26561449424d32a70227cfd0dcb41ddcec5a2e7dd4921d3ce6c0538e6569 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 13 18:33:47 addons-315216 cri-dockerd[1603]: time="2024-09-13T18:33:47Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"snapshot-controller-56fcc65765-vsk8h_kube-system\": unexpected command output nsenter: cannot open /proc/4895/ns/net: No such file or directory\n with error: exit status 1"
Sep 13 18:33:47 addons-315216 dockerd[1338]: time="2024-09-13T18:33:47.560498656Z" level=info msg="ignoring event" container=7f013354760312ca2ffcd868d797a8cec827b4839470ec0992f6b03b19567672 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 13 18:33:50 addons-315216 dockerd[1338]: time="2024-09-13T18:33:50.646989223Z" level=info msg="Container failed to exit within 30s of signal 15 - using the force" container=3005df670852e05dfec0a608f9fb5e529fe3817074331f2268b2b0b1020d4dab
Sep 13 18:33:50 addons-315216 dockerd[1338]: time="2024-09-13T18:33:50.666846765Z" level=info msg="ignoring event" container=3005df670852e05dfec0a608f9fb5e529fe3817074331f2268b2b0b1020d4dab module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 13 18:33:50 addons-315216 dockerd[1338]: time="2024-09-13T18:33:50.787351635Z" level=info msg="ignoring event" container=a00ad65edb70a6c7fc9cca7a5aeabd324dc28adfc15eb3c204908118a6438321 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 13 18:33:55 addons-315216 dockerd[1338]: time="2024-09-13T18:33:55.124834694Z" level=info msg="ignoring event" container=f75e3d31f40ea6f09a03d467bb467771387d6ae116de584d6d8de2aaf952e55f module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 13 18:33:55 addons-315216 cri-dockerd[1603]: time="2024-09-13T18:33:55Z" level=error msg="error getting RW layer size for container ID '3005df670852e05dfec0a608f9fb5e529fe3817074331f2268b2b0b1020d4dab': Error response from daemon: No such container: 3005df670852e05dfec0a608f9fb5e529fe3817074331f2268b2b0b1020d4dab"
Sep 13 18:33:55 addons-315216 cri-dockerd[1603]: time="2024-09-13T18:33:55Z" level=error msg="Set backoffDuration to : 1m0s for container ID '3005df670852e05dfec0a608f9fb5e529fe3817074331f2268b2b0b1020d4dab'"
Sep 13 18:33:55 addons-315216 dockerd[1338]: time="2024-09-13T18:33:55.606474918Z" level=info msg="ignoring event" container=09c283ca052c8d8f8ac7439d6bcbbef27e851b72f8c2f976ccb6214a8335f2b8 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 13 18:33:55 addons-315216 dockerd[1338]: time="2024-09-13T18:33:55.669783905Z" level=info msg="ignoring event" container=a9c1d39517cc60fb6d08fe5da04a0d2d8c507457c8e761d1465b185c411788ff module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 13 18:33:55 addons-315216 cri-dockerd[1603]: time="2024-09-13T18:33:55Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"registry-66c9cd494c-cvtgf_kube-system\": unexpected command output nsenter: cannot open /proc/3808/ns/net: No such file or directory\n with error: exit status 1"
Sep 13 18:33:55 addons-315216 dockerd[1338]: time="2024-09-13T18:33:55.751124777Z" level=info msg="ignoring event" container=ed8ef66e14f9f2445129d2a9231ef5ad8ce6310b62bc37b87b17e442c698c5bb module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 13 18:33:55 addons-315216 dockerd[1338]: time="2024-09-13T18:33:55.816006917Z" level=info msg="ignoring event" container=4228b78f9cbe6d607372551f7d4bacb06ffe8a6c97b96940a48405acdad25c6c module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD
341cbdd33017a a416a98b71e22 36 seconds ago Exited helper-pod 0 e88dbbbc27eb8 helper-pod-delete-pvc-2743739b-b850-4f4a-8324-d1e883dc17e5
b755e69b74345 busybox@sha256:c230832bd3b0be59a6c47ed64294f9ce71e91b327957920b6929a0caa8353140 39 seconds ago Exited busybox 0 cd6890e3c93d8 test-local-path
6eeed28aac9c1 busybox@sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79 43 seconds ago Exited helper-pod 0 c1608097129fa helper-pod-create-pvc-2743739b-b850-4f4a-8324-d1e883dc17e5
8f79e6d5fe2e1 kicbase/echo-server@sha256:127ac38a2bb9537b7f252addff209ea6801edcac8a92c8b1104dacd66a583ed6 45 seconds ago Running hello-world-app 0 39794026d32fc hello-world-app-55bf9c44b4-s2vv8
469485a0cc670 nginx@sha256:a5127daff3d6f4606be3100a252419bfa84fd6ee5cd74d0feaca1a5068f97dcf 53 seconds ago Running nginx 0 0da52d43730a0 nginx
8a279ca7ee383 gcr.io/k8s-minikube/gcp-auth-webhook@sha256:e6c5b3bc32072ea370d34c27836efd11b3519d25bd444c2a8efc339cff0e20fb 9 minutes ago Running gcp-auth 0 2ebf1e9d2262f gcp-auth-89d5ffd79-2nz72
a9c1d39517cc6 gcr.io/k8s-minikube/kube-registry-proxy@sha256:b3fa0b2df8737fdb85ad5918a7e2652527463e357afff83a5e5bb966bcedc367 12 minutes ago Exited registry-proxy 0 4228b78f9cbe6 registry-proxy-f7ltj
09c283ca052c8 registry@sha256:ac0192b549007e22998eb74e8d8488dcfe70f1489520c3b144a6047ac5efbe90 12 minutes ago Exited registry 0 ed8ef66e14f9f registry-66c9cd494c-cvtgf
a9ce8589ddeaa 6e38f40d628db 12 minutes ago Running storage-provisioner 0 09486a9e994bb storage-provisioner
544a45c4cd5b9 c69fa2e9cbf5f 12 minutes ago Running coredns 0 1f5ef07f6fd07 coredns-7c65d6cfc9-nj2wd
0308e52ea005b 60c005f310ff3 12 minutes ago Running kube-proxy 0 346d1f687f17f kube-proxy-tfw7l
67ee7bebe548c 2e96e5913fc06 12 minutes ago Running etcd 0 0cc635df3d092 etcd-addons-315216
40d4f39434b28 175ffd71cce3d 12 minutes ago Running kube-controller-manager 0 87f22ce877b01 kube-controller-manager-addons-315216
8a77b7004be60 9aa1fad941575 12 minutes ago Running kube-scheduler 0 042932805c58e kube-scheduler-addons-315216
1513e7b25742a 6bab7719df100 12 minutes ago Running kube-apiserver 0 7b03ca6f9b68b kube-apiserver-addons-315216
==> coredns [544a45c4cd5b] <==
[INFO] 10.244.0.21:50623 - 54815 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.005236642s
[INFO] 10.244.0.21:33573 - 52588 "AAAA IN hello-world-app.default.svc.cluster.local.c.k8s-minikube.internal. udp 83 false 512" NXDOMAIN qr,rd,ra 83 0.005465878s
[INFO] 10.244.0.21:59053 - 26920 "AAAA IN hello-world-app.default.svc.cluster.local.c.k8s-minikube.internal. udp 83 false 512" NXDOMAIN qr,rd,ra 83 0.005074627s
[INFO] 10.244.0.21:56063 - 29935 "AAAA IN hello-world-app.default.svc.cluster.local.c.k8s-minikube.internal. udp 83 false 512" NXDOMAIN qr,rd,ra 83 0.001307241s
[INFO] 10.244.0.21:55934 - 45252 "AAAA IN hello-world-app.default.svc.cluster.local.c.k8s-minikube.internal. udp 83 false 512" NXDOMAIN qr,rd,ra 83 0.004258563s
[INFO] 10.244.0.21:50623 - 30256 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.005750034s
[INFO] 10.244.0.21:50623 - 39039 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000081157s
[INFO] 10.244.0.21:33573 - 62657 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.004918161s
[INFO] 10.244.0.21:59053 - 51155 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.003282374s
[INFO] 10.244.0.21:55934 - 42925 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.004312214s
[INFO] 10.244.0.21:56063 - 61286 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.005186376s
[INFO] 10.244.0.21:56063 - 12933 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.003175245s
[INFO] 10.244.0.21:33573 - 48080 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.003357526s
[INFO] 10.244.0.21:55934 - 21435 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.003594358s
[INFO] 10.244.0.21:59053 - 22280 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.001570503s
[INFO] 10.244.0.21:59053 - 58034 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.00006771s
[INFO] 10.244.0.21:56063 - 595 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000158883s
[INFO] 10.244.0.21:55934 - 49891 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000131037s
[INFO] 10.244.0.21:33573 - 63494 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000106943s
[INFO] 10.244.0.21:58133 - 50298 "AAAA IN hello-world-app.default.svc.cluster.local.us-east4-a.c.k8s-minikube.internal. udp 94 false 512" NXDOMAIN qr,rd,ra 94 0.004874632s
[INFO] 10.244.0.21:58133 - 15964 "A IN hello-world-app.default.svc.cluster.local.c.k8s-minikube.internal. udp 83 false 512" NXDOMAIN qr,rd,ra 83 0.004980408s
[INFO] 10.244.0.21:58133 - 8754 "AAAA IN hello-world-app.default.svc.cluster.local.c.k8s-minikube.internal. udp 83 false 512" NXDOMAIN qr,rd,ra 83 0.079609181s
[INFO] 10.244.0.21:58133 - 23877 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.004613704s
[INFO] 10.244.0.21:58133 - 44057 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.004799811s
[INFO] 10.244.0.21:58133 - 6483 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000133819s
==> describe nodes <==
Name: addons-315216
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=addons-315216
kubernetes.io/os=linux
minikube.k8s.io/commit=fdd33bebc6743cfd1c61ec7fe066add478610a92
minikube.k8s.io/name=addons-315216
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2024_09_13T18_21_15_0700
minikube.k8s.io/version=v1.34.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
topology.hostpath.csi/node=addons-315216
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///var/run/cri-dockerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Fri, 13 Sep 2024 18:21:13 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: addons-315216
AcquireTime: <unset>
RenewTime: Fri, 13 Sep 2024 18:33:50 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Fri, 13 Sep 2024 18:33:49 +0000 Fri, 13 Sep 2024 18:21:11 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Fri, 13 Sep 2024 18:33:49 +0000 Fri, 13 Sep 2024 18:21:11 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Fri, 13 Sep 2024 18:33:49 +0000 Fri, 13 Sep 2024 18:21:11 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Fri, 13 Sep 2024 18:33:49 +0000 Fri, 13 Sep 2024 18:21:13 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.49.2
Hostname: addons-315216
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32859320Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32859320Ki
pods: 110
System Info:
Machine ID: 20bec4f7953f4f749c6214db2c7a3ff3
System UUID: 36576c8a-8490-4e74-9790-97effe6fd933
Boot ID: 0dbba4ce-a2df-496d-8af2-30d1b719613b
Kernel Version: 5.15.0-1068-gcp
OS Image: Ubuntu 22.04.4 LTS
Operating System: linux
Architecture: amd64
Container Runtime Version: docker://27.2.1
Kubelet Version: v1.31.1
Kube-Proxy Version: v1.31.1
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (11 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 9m14s
default hello-world-app-55bf9c44b4-s2vv8 0 (0%) 0 (0%) 0 (0%) 0 (0%) 46s
default nginx 0 (0%) 0 (0%) 0 (0%) 0 (0%) 55s
gcp-auth gcp-auth-89d5ffd79-2nz72 0 (0%) 0 (0%) 0 (0%) 0 (0%) 10m
kube-system coredns-7c65d6cfc9-nj2wd 100m (1%) 0 (0%) 70Mi (0%) 170Mi (0%) 12m
kube-system etcd-addons-315216 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 12m
kube-system kube-apiserver-addons-315216 250m (3%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system kube-controller-manager-addons-315216 200m (2%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system kube-proxy-tfw7l 0 (0%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system kube-scheduler-addons-315216 100m (1%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 12m
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 750m (9%) 0 (0%)
memory 170Mi (0%) 170Mi (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 12m kube-proxy
Normal Starting 12m kubelet Starting kubelet.
Warning CgroupV1 12m kubelet Cgroup v1 support is in maintenance mode, please migrate to Cgroup v2.
Normal NodeAllocatableEnforced 12m kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 12m kubelet Node addons-315216 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 12m kubelet Node addons-315216 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 12m kubelet Node addons-315216 status is now: NodeHasSufficientPID
Normal RegisteredNode 12m node-controller Node addons-315216 event: Registered Node addons-315216 in Controller
==> dmesg <==
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff 62 d0 3e db cf d6 08 06
[ +1.429097] IPv4: martian source 10.244.0.1 from 10.244.0.17, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff 92 89 50 b4 21 9e 08 06
[ +5.154670] IPv4: martian source 10.244.0.1 from 10.244.0.20, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff 26 ad 20 80 b0 8d 08 06
[ +0.513317] IPv4: martian source 10.244.0.1 from 10.244.0.18, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff 2e ba 8c be bd 7c 08 06
[ +0.384664] IPv4: martian source 10.244.0.1 from 10.244.0.19, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff ee e9 68 d1 4a cb 08 06
[ +24.413459] IPv4: martian source 10.244.0.1 from 10.244.0.21, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff 56 42 49 f1 83 41 08 06
[ +1.014545] IPv4: martian source 10.244.0.1 from 10.244.0.22, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff 82 e9 6c 93 06 21 08 06
[Sep13 18:23] IPv4: martian source 10.244.0.1 from 10.244.0.23, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff 0a bb 85 00 22 a5 08 06
[ +0.288265] IPv4: martian source 10.244.0.1 from 10.244.0.24, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff f6 b2 49 14 0c e1 08 06
[Sep13 18:24] IPv4: martian source 10.244.0.1 from 10.244.0.25, on dev eth0
[ +0.000010] ll header: 00000000: ff ff ff ff ff ff 86 13 49 36 5b 1b 08 06
[ +0.000479] IPv4: martian source 10.244.0.25 from 10.244.0.2, on dev eth0
[ +0.000004] ll header: 00000000: ff ff ff ff ff ff ea c0 87 ff 69 0f 08 06
[Sep13 18:33] IPv4: martian source 10.244.0.30 from 10.244.0.21, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff 56 42 49 f1 83 41 08 06
[ +1.693899] IPv4: martian source 10.244.0.21 from 10.244.0.2, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff ea c0 87 ff 69 0f 08 06
==> etcd [67ee7bebe548] <==
{"level":"info","ts":"2024-09-13T18:21:10.924151Z","caller":"embed/etcd.go:279","msg":"now serving peer/client/metrics","local-member-id":"aec36adc501070cc","initial-advertise-peer-urls":["https://192.168.49.2:2380"],"listen-peer-urls":["https://192.168.49.2:2380"],"advertise-client-urls":["https://192.168.49.2:2379"],"listen-client-urls":["https://127.0.0.1:2379","https://192.168.49.2:2379"],"listen-metrics-urls":["http://127.0.0.1:2381"]}
{"level":"info","ts":"2024-09-13T18:21:10.924180Z","caller":"embed/etcd.go:870","msg":"serving metrics","address":"http://127.0.0.1:2381"}
{"level":"info","ts":"2024-09-13T18:21:11.214394Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc is starting a new election at term 1"}
{"level":"info","ts":"2024-09-13T18:21:11.214430Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became pre-candidate at term 1"}
{"level":"info","ts":"2024-09-13T18:21:11.214444Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc received MsgPreVoteResp from aec36adc501070cc at term 1"}
{"level":"info","ts":"2024-09-13T18:21:11.214469Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became candidate at term 2"}
{"level":"info","ts":"2024-09-13T18:21:11.214477Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc received MsgVoteResp from aec36adc501070cc at term 2"}
{"level":"info","ts":"2024-09-13T18:21:11.214489Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became leader at term 2"}
{"level":"info","ts":"2024-09-13T18:21:11.214507Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: aec36adc501070cc elected leader aec36adc501070cc at term 2"}
{"level":"info","ts":"2024-09-13T18:21:11.215628Z","caller":"etcdserver/server.go:2118","msg":"published local member to cluster through raft","local-member-id":"aec36adc501070cc","local-member-attributes":"{Name:addons-315216 ClientURLs:[https://192.168.49.2:2379]}","request-path":"/0/members/aec36adc501070cc/attributes","cluster-id":"fa54960ea34d58be","publish-timeout":"7s"}
{"level":"info","ts":"2024-09-13T18:21:11.215694Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2024-09-13T18:21:11.215710Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2024-09-13T18:21:11.215815Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2024-09-13T18:21:11.215860Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2024-09-13T18:21:11.215826Z","caller":"etcdserver/server.go:2629","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2024-09-13T18:21:11.216648Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"fa54960ea34d58be","local-member-id":"aec36adc501070cc","cluster-version":"3.5"}
{"level":"info","ts":"2024-09-13T18:21:11.216723Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2024-09-13T18:21:11.216742Z","caller":"etcdserver/server.go:2653","msg":"cluster version is updated","cluster-version":"3.5"}
{"level":"info","ts":"2024-09-13T18:21:11.216776Z","caller":"v3rpc/health.go:61","msg":"grpc service status changed","service":"","status":"SERVING"}
{"level":"info","ts":"2024-09-13T18:21:11.216885Z","caller":"v3rpc/health.go:61","msg":"grpc service status changed","service":"","status":"SERVING"}
{"level":"info","ts":"2024-09-13T18:21:11.217730Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"info","ts":"2024-09-13T18:21:11.218046Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.49.2:2379"}
{"level":"info","ts":"2024-09-13T18:31:11.622452Z","caller":"mvcc/index.go:214","msg":"compact tree index","revision":1863}
{"level":"info","ts":"2024-09-13T18:31:11.644836Z","caller":"mvcc/kvstore_compaction.go:69","msg":"finished scheduled compaction","compact-revision":1863,"took":"21.868643ms","hash":687279061,"current-db-size-bytes":9109504,"current-db-size":"9.1 MB","current-db-size-in-use-bytes":4927488,"current-db-size-in-use":"4.9 MB"}
{"level":"info","ts":"2024-09-13T18:31:11.644875Z","caller":"mvcc/hash.go:137","msg":"storing new hash","hash":687279061,"revision":1863,"compact-revision":-1}
==> gcp-auth [8a279ca7ee38] <==
2024/09/13 18:24:42 Ready to write response ...
2024/09/13 18:24:42 Ready to marshal response ...
2024/09/13 18:24:42 Ready to write response ...
2024/09/13 18:32:45 Ready to marshal response ...
2024/09/13 18:32:45 Ready to write response ...
2024/09/13 18:32:45 Ready to marshal response ...
2024/09/13 18:32:45 Ready to write response ...
2024/09/13 18:32:45 Ready to marshal response ...
2024/09/13 18:32:45 Ready to write response ...
2024/09/13 18:32:55 Ready to marshal response ...
2024/09/13 18:32:55 Ready to write response ...
2024/09/13 18:33:01 Ready to marshal response ...
2024/09/13 18:33:01 Ready to write response ...
2024/09/13 18:33:02 Ready to marshal response ...
2024/09/13 18:33:02 Ready to write response ...
2024/09/13 18:33:10 Ready to marshal response ...
2024/09/13 18:33:10 Ready to write response ...
2024/09/13 18:33:11 Ready to marshal response ...
2024/09/13 18:33:11 Ready to write response ...
2024/09/13 18:33:11 Ready to marshal response ...
2024/09/13 18:33:11 Ready to write response ...
2024/09/13 18:33:20 Ready to marshal response ...
2024/09/13 18:33:20 Ready to write response ...
2024/09/13 18:33:32 Ready to marshal response ...
2024/09/13 18:33:32 Ready to write response ...
==> kernel <==
18:33:56 up 16 min, 0 users, load average: 1.78, 0.70, 0.46
Linux addons-315216 5.15.0-1068-gcp #76~20.04.1-Ubuntu SMP Tue Aug 20 15:52:45 UTC 2024 x86_64 x86_64 x86_64 GNU/Linux
PRETTY_NAME="Ubuntu 22.04.4 LTS"
==> kube-apiserver [1513e7b25742] <==
W0913 18:24:34.716093 1 cacher.go:171] Terminating all watchers from cacher jobs.batch.volcano.sh
W0913 18:24:35.007862 1 cacher.go:171] Terminating all watchers from cacher jobflows.flow.volcano.sh
W0913 18:24:35.300810 1 cacher.go:171] Terminating all watchers from cacher jobtemplates.flow.volcano.sh
I0913 18:32:45.440187 1 alloc.go:330] "allocated clusterIPs" service="headlamp/headlamp" clusterIPs={"IPv4":"10.107.207.45"}
I0913 18:32:50.241279 1 handler.go:286] Adding GroupVersion gadget.kinvolk.io v1alpha1 to ResourceManager
W0913 18:32:51.295851 1 cacher.go:171] Terminating all watchers from cacher traces.gadget.kinvolk.io
I0913 18:33:01.357605 1 controller.go:615] quota admission added evaluator for: ingresses.networking.k8s.io
I0913 18:33:01.517874 1 alloc.go:330] "allocated clusterIPs" service="default/nginx" clusterIPs={"IPv4":"10.103.74.166"}
I0913 18:33:03.132340 1 controller.go:129] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Nothing (removed from the queue).
I0913 18:33:10.983415 1 alloc.go:330] "allocated clusterIPs" service="default/hello-world-app" clusterIPs={"IPv4":"10.100.6.12"}
I0913 18:33:10.992098 1 controller.go:615] quota admission added evaluator for: volumesnapshots.snapshot.storage.k8s.io
E0913 18:33:36.126991 1 authentication.go:73] "Unable to authenticate the request" err="[invalid bearer token, serviceaccounts \"local-path-provisioner-service-account\" not found]"
I0913 18:33:47.284129 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0913 18:33:47.284184 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0913 18:33:47.296075 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0913 18:33:47.296122 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0913 18:33:47.297704 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0913 18:33:47.297746 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0913 18:33:47.307940 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0913 18:33:47.307985 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0913 18:33:47.318327 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0913 18:33:47.318364 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
W0913 18:33:48.298691 1 cacher.go:171] Terminating all watchers from cacher volumesnapshotclasses.snapshot.storage.k8s.io
W0913 18:33:48.319299 1 cacher.go:171] Terminating all watchers from cacher volumesnapshotcontents.snapshot.storage.k8s.io
W0913 18:33:48.428542 1 cacher.go:171] Terminating all watchers from cacher volumesnapshots.snapshot.storage.k8s.io
==> kube-controller-manager [40d4f39434b2] <==
E0913 18:33:48.299997 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
E0913 18:33:48.320381 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
E0913 18:33:48.429768 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0913 18:33:49.256808 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0913 18:33:49.256849 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0913 18:33:49.525874 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0913 18:33:49.525922 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
I0913 18:33:49.555479 1 range_allocator.go:241] "Successfully synced" logger="node-ipam-controller" key="addons-315216"
W0913 18:33:49.862563 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0913 18:33:49.862598 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
I0913 18:33:50.508427 1 shared_informer.go:313] Waiting for caches to sync for resource quota
I0913 18:33:50.508468 1 shared_informer.go:320] Caches are synced for resource quota
W0913 18:33:50.530337 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0913 18:33:50.530372 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
I0913 18:33:50.701794 1 shared_informer.go:313] Waiting for caches to sync for garbage collector
I0913 18:33:50.701842 1 shared_informer.go:320] Caches are synced for garbage collector
W0913 18:33:52.015861 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0913 18:33:52.015899 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0913 18:33:52.076499 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0913 18:33:52.076533 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0913 18:33:52.100825 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0913 18:33:52.100854 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0913 18:33:52.110975 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0913 18:33:52.111002 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
I0913 18:33:55.550003 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/registry-66c9cd494c" duration="7.918µs"
==> kube-proxy [0308e52ea005] <==
I0913 18:21:24.217785 1 server_linux.go:66] "Using iptables proxy"
I0913 18:21:24.706405 1 server.go:677] "Successfully retrieved node IP(s)" IPs=["192.168.49.2"]
E0913 18:21:24.706483 1 server.go:234] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I0913 18:21:24.909031 1 server.go:243] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I0913 18:21:24.909089 1 server_linux.go:169] "Using iptables Proxier"
I0913 18:21:24.999429 1 proxier.go:255] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I0913 18:21:24.999841 1 server.go:483] "Version info" version="v1.31.1"
I0913 18:21:24.999866 1 server.go:485] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I0913 18:21:25.003344 1 config.go:199] "Starting service config controller"
I0913 18:21:25.003382 1 shared_informer.go:313] Waiting for caches to sync for service config
I0913 18:21:25.003415 1 config.go:105] "Starting endpoint slice config controller"
I0913 18:21:25.003421 1 shared_informer.go:313] Waiting for caches to sync for endpoint slice config
I0913 18:21:25.003892 1 config.go:328] "Starting node config controller"
I0913 18:21:25.003903 1 shared_informer.go:313] Waiting for caches to sync for node config
I0913 18:21:25.105363 1 shared_informer.go:320] Caches are synced for node config
I0913 18:21:25.105413 1 shared_informer.go:320] Caches are synced for service config
I0913 18:21:25.105444 1 shared_informer.go:320] Caches are synced for endpoint slice config
==> kube-scheduler [8a77b7004be6] <==
E0913 18:21:13.112176 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicationcontrollers\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0913 18:21:13.111157 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E0913 18:21:13.112194 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
E0913 18:21:13.112278 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0913 18:21:13.111251 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
E0913 18:21:13.112312 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User \"system:kube-scheduler\" cannot list resource \"namespaces\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0913 18:21:13.111270 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
E0913 18:21:13.112349 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"statefulsets\" in API group \"apps\" at the cluster scope" logger="UnhandledError"
W0913 18:21:13.111282 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
E0913 18:21:13.112226 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User \"system:kube-scheduler\" cannot list resource \"nodes\" in API group \"\" at the cluster scope" logger="UnhandledError"
E0913 18:21:13.112385 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError"
E0913 18:21:13.112282 1 reflector.go:158] "Unhandled Error" err="runtime/asm_amd64.s:1695: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"extension-apiserver-authentication\" is forbidden: User \"system:kube-scheduler\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\"" logger="UnhandledError"
W0913 18:21:13.925807 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E0913 18:21:13.925845 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User \"system:kube-scheduler\" cannot list resource \"services\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0913 18:21:14.042099 1 reflector.go:561] runtime/asm_amd64.s:1695: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
E0913 18:21:14.042348 1 reflector.go:158] "Unhandled Error" err="runtime/asm_amd64.s:1695: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"extension-apiserver-authentication\" is forbidden: User \"system:kube-scheduler\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\"" logger="UnhandledError"
W0913 18:21:14.093144 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
E0913 18:21:14.093187 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"statefulsets\" in API group \"apps\" at the cluster scope" logger="UnhandledError"
W0913 18:21:14.137282 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
E0913 18:21:14.137316 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0913 18:21:14.144344 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
E0913 18:21:14.144384 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError"
W0913 18:21:14.251764 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
E0913 18:21:14.251812 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User \"system:kube-scheduler\" cannot list resource \"namespaces\" in API group \"\" at the cluster scope" logger="UnhandledError"
I0913 18:21:16.509959 1 shared_informer.go:320] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Sep 13 18:33:49 addons-315216 kubelet[2446]: I0913 18:33:49.225520 2446 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="af53978d-78a7-4c1a-960b-e203fade05e4" path="/var/lib/kubelet/pods/af53978d-78a7-4c1a-960b-e203fade05e4/volumes"
Sep 13 18:33:50 addons-315216 kubelet[2446]: E0913 18:33:50.219972 2446 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"busybox\" with ImagePullBackOff: \"Back-off pulling image \\\"gcr.io/k8s-minikube/busybox:1.28.4-glibc\\\"\"" pod="default/busybox" podUID="cc20ad18-6fce-408c-8205-eb67a03c9a44"
Sep 13 18:33:50 addons-315216 kubelet[2446]: I0913 18:33:50.959534 2446 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/dd053b82-587a-4124-a7ff-31feeca967dc-config-volume\") pod \"dd053b82-587a-4124-a7ff-31feeca967dc\" (UID: \"dd053b82-587a-4124-a7ff-31feeca967dc\") "
Sep 13 18:33:50 addons-315216 kubelet[2446]: I0913 18:33:50.959585 2446 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-79fxs\" (UniqueName: \"kubernetes.io/projected/dd053b82-587a-4124-a7ff-31feeca967dc-kube-api-access-79fxs\") pod \"dd053b82-587a-4124-a7ff-31feeca967dc\" (UID: \"dd053b82-587a-4124-a7ff-31feeca967dc\") "
Sep 13 18:33:50 addons-315216 kubelet[2446]: I0913 18:33:50.959953 2446 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dd053b82-587a-4124-a7ff-31feeca967dc-config-volume" (OuterVolumeSpecName: "config-volume") pod "dd053b82-587a-4124-a7ff-31feeca967dc" (UID: "dd053b82-587a-4124-a7ff-31feeca967dc"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue ""
Sep 13 18:33:50 addons-315216 kubelet[2446]: I0913 18:33:50.961271 2446 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dd053b82-587a-4124-a7ff-31feeca967dc-kube-api-access-79fxs" (OuterVolumeSpecName: "kube-api-access-79fxs") pod "dd053b82-587a-4124-a7ff-31feeca967dc" (UID: "dd053b82-587a-4124-a7ff-31feeca967dc"). InnerVolumeSpecName "kube-api-access-79fxs". PluginName "kubernetes.io/projected", VolumeGidValue ""
Sep 13 18:33:51 addons-315216 kubelet[2446]: I0913 18:33:51.060630 2446 reconciler_common.go:288] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/dd053b82-587a-4124-a7ff-31feeca967dc-config-volume\") on node \"addons-315216\" DevicePath \"\""
Sep 13 18:33:51 addons-315216 kubelet[2446]: I0913 18:33:51.060663 2446 reconciler_common.go:288] "Volume detached for volume \"kube-api-access-79fxs\" (UniqueName: \"kubernetes.io/projected/dd053b82-587a-4124-a7ff-31feeca967dc-kube-api-access-79fxs\") on node \"addons-315216\" DevicePath \"\""
Sep 13 18:33:51 addons-315216 kubelet[2446]: I0913 18:33:51.648876 2446 scope.go:117] "RemoveContainer" containerID="3005df670852e05dfec0a608f9fb5e529fe3817074331f2268b2b0b1020d4dab"
Sep 13 18:33:51 addons-315216 kubelet[2446]: I0913 18:33:51.662575 2446 scope.go:117] "RemoveContainer" containerID="3005df670852e05dfec0a608f9fb5e529fe3817074331f2268b2b0b1020d4dab"
Sep 13 18:33:51 addons-315216 kubelet[2446]: E0913 18:33:51.663209 2446 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = Unknown desc = Error response from daemon: No such container: 3005df670852e05dfec0a608f9fb5e529fe3817074331f2268b2b0b1020d4dab" containerID="3005df670852e05dfec0a608f9fb5e529fe3817074331f2268b2b0b1020d4dab"
Sep 13 18:33:51 addons-315216 kubelet[2446]: I0913 18:33:51.663241 2446 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"docker","ID":"3005df670852e05dfec0a608f9fb5e529fe3817074331f2268b2b0b1020d4dab"} err="failed to get container status \"3005df670852e05dfec0a608f9fb5e529fe3817074331f2268b2b0b1020d4dab\": rpc error: code = Unknown desc = Error response from daemon: No such container: 3005df670852e05dfec0a608f9fb5e529fe3817074331f2268b2b0b1020d4dab"
Sep 13 18:33:53 addons-315216 kubelet[2446]: I0913 18:33:53.224596 2446 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dd053b82-587a-4124-a7ff-31feeca967dc" path="/var/lib/kubelet/pods/dd053b82-587a-4124-a7ff-31feeca967dc/volumes"
Sep 13 18:33:55 addons-315216 kubelet[2446]: I0913 18:33:55.285434 2446 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m8cpc\" (UniqueName: \"kubernetes.io/projected/e8a2ab5b-f9f4-47b1-9797-7023fd89a3d5-kube-api-access-m8cpc\") pod \"e8a2ab5b-f9f4-47b1-9797-7023fd89a3d5\" (UID: \"e8a2ab5b-f9f4-47b1-9797-7023fd89a3d5\") "
Sep 13 18:33:55 addons-315216 kubelet[2446]: I0913 18:33:55.285483 2446 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"gcp-creds\" (UniqueName: \"kubernetes.io/host-path/e8a2ab5b-f9f4-47b1-9797-7023fd89a3d5-gcp-creds\") pod \"e8a2ab5b-f9f4-47b1-9797-7023fd89a3d5\" (UID: \"e8a2ab5b-f9f4-47b1-9797-7023fd89a3d5\") "
Sep 13 18:33:55 addons-315216 kubelet[2446]: I0913 18:33:55.285555 2446 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e8a2ab5b-f9f4-47b1-9797-7023fd89a3d5-gcp-creds" (OuterVolumeSpecName: "gcp-creds") pod "e8a2ab5b-f9f4-47b1-9797-7023fd89a3d5" (UID: "e8a2ab5b-f9f4-47b1-9797-7023fd89a3d5"). InnerVolumeSpecName "gcp-creds". PluginName "kubernetes.io/host-path", VolumeGidValue ""
Sep 13 18:33:55 addons-315216 kubelet[2446]: I0913 18:33:55.287088 2446 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8a2ab5b-f9f4-47b1-9797-7023fd89a3d5-kube-api-access-m8cpc" (OuterVolumeSpecName: "kube-api-access-m8cpc") pod "e8a2ab5b-f9f4-47b1-9797-7023fd89a3d5" (UID: "e8a2ab5b-f9f4-47b1-9797-7023fd89a3d5"). InnerVolumeSpecName "kube-api-access-m8cpc". PluginName "kubernetes.io/projected", VolumeGidValue ""
Sep 13 18:33:55 addons-315216 kubelet[2446]: I0913 18:33:55.386331 2446 reconciler_common.go:288] "Volume detached for volume \"kube-api-access-m8cpc\" (UniqueName: \"kubernetes.io/projected/e8a2ab5b-f9f4-47b1-9797-7023fd89a3d5-kube-api-access-m8cpc\") on node \"addons-315216\" DevicePath \"\""
Sep 13 18:33:55 addons-315216 kubelet[2446]: I0913 18:33:55.386364 2446 reconciler_common.go:288] "Volume detached for volume \"gcp-creds\" (UniqueName: \"kubernetes.io/host-path/e8a2ab5b-f9f4-47b1-9797-7023fd89a3d5-gcp-creds\") on node \"addons-315216\" DevicePath \"\""
Sep 13 18:33:55 addons-315216 kubelet[2446]: I0913 18:33:55.995829 2446 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9nl4r\" (UniqueName: \"kubernetes.io/projected/58047cca-a75a-41df-bc9f-a91dc5a547ca-kube-api-access-9nl4r\") pod \"58047cca-a75a-41df-bc9f-a91dc5a547ca\" (UID: \"58047cca-a75a-41df-bc9f-a91dc5a547ca\") "
Sep 13 18:33:55 addons-315216 kubelet[2446]: I0913 18:33:55.995893 2446 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xddr8\" (UniqueName: \"kubernetes.io/projected/7dfd72e6-65ac-495b-9832-0e900d22d7e6-kube-api-access-xddr8\") pod \"7dfd72e6-65ac-495b-9832-0e900d22d7e6\" (UID: \"7dfd72e6-65ac-495b-9832-0e900d22d7e6\") "
Sep 13 18:33:55 addons-315216 kubelet[2446]: I0913 18:33:55.997944 2446 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7dfd72e6-65ac-495b-9832-0e900d22d7e6-kube-api-access-xddr8" (OuterVolumeSpecName: "kube-api-access-xddr8") pod "7dfd72e6-65ac-495b-9832-0e900d22d7e6" (UID: "7dfd72e6-65ac-495b-9832-0e900d22d7e6"). InnerVolumeSpecName "kube-api-access-xddr8". PluginName "kubernetes.io/projected", VolumeGidValue ""
Sep 13 18:33:55 addons-315216 kubelet[2446]: I0913 18:33:55.997939 2446 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/58047cca-a75a-41df-bc9f-a91dc5a547ca-kube-api-access-9nl4r" (OuterVolumeSpecName: "kube-api-access-9nl4r") pod "58047cca-a75a-41df-bc9f-a91dc5a547ca" (UID: "58047cca-a75a-41df-bc9f-a91dc5a547ca"). InnerVolumeSpecName "kube-api-access-9nl4r". PluginName "kubernetes.io/projected", VolumeGidValue ""
Sep 13 18:33:56 addons-315216 kubelet[2446]: I0913 18:33:56.096483 2446 reconciler_common.go:288] "Volume detached for volume \"kube-api-access-9nl4r\" (UniqueName: \"kubernetes.io/projected/58047cca-a75a-41df-bc9f-a91dc5a547ca-kube-api-access-9nl4r\") on node \"addons-315216\" DevicePath \"\""
Sep 13 18:33:56 addons-315216 kubelet[2446]: I0913 18:33:56.096533 2446 reconciler_common.go:288] "Volume detached for volume \"kube-api-access-xddr8\" (UniqueName: \"kubernetes.io/projected/7dfd72e6-65ac-495b-9832-0e900d22d7e6-kube-api-access-xddr8\") on node \"addons-315216\" DevicePath \"\""
==> storage-provisioner [a9ce8589ddea] <==
I0913 18:21:28.807206 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I0913 18:21:28.894610 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I0913 18:21:28.894661 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I0913 18:21:28.907515 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I0913 18:21:28.908782 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"9573cd56-4073-4047-8229-80fa85ec9d93", APIVersion:"v1", ResourceVersion:"619", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' addons-315216_7ed8fcb8-4ad4-4bb5-a9dc-d3c6fa05c16a became leader
I0913 18:21:28.908976 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_addons-315216_7ed8fcb8-4ad4-4bb5-a9dc-d3c6fa05c16a!
I0913 18:21:29.010094 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_addons-315216_7ed8fcb8-4ad4-4bb5-a9dc-d3c6fa05c16a!
-- /stdout --
helpers_test.go:254: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p addons-315216 -n addons-315216
helpers_test.go:261: (dbg) Run: kubectl --context addons-315216 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:272: non-running pods: busybox
helpers_test.go:274: ======> post-mortem[TestAddons/parallel/Registry]: describe non-running pods <======
helpers_test.go:277: (dbg) Run: kubectl --context addons-315216 describe pod busybox
helpers_test.go:282: (dbg) kubectl --context addons-315216 describe pod busybox:
-- stdout --
Name: busybox
Namespace: default
Priority: 0
Service Account: default
Node: addons-315216/192.168.49.2
Start Time: Fri, 13 Sep 2024 18:24:42 +0000
Labels: integration-test=busybox
Annotations: <none>
Status: Pending
IP: 10.244.0.27
IPs:
IP: 10.244.0.27
Containers:
busybox:
Container ID:
Image: gcr.io/k8s-minikube/busybox:1.28.4-glibc
Image ID:
Port: <none>
Host Port: <none>
Command:
sleep
3600
State: Waiting
Reason: ImagePullBackOff
Ready: False
Restart Count: 0
Environment:
GOOGLE_APPLICATION_CREDENTIALS: /google-app-creds.json
PROJECT_ID: this_is_fake
GCP_PROJECT: this_is_fake
GCLOUD_PROJECT: this_is_fake
GOOGLE_CLOUD_PROJECT: this_is_fake
CLOUDSDK_CORE_PROJECT: this_is_fake
Mounts:
/google-app-creds.json from gcp-creds (ro)
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-xwmf7 (ro)
Conditions:
Type Status
PodReadyToStartContainers True
Initialized True
Ready False
ContainersReady False
PodScheduled True
Volumes:
kube-api-access-xwmf7:
Type: Projected (a volume that contains injected data from multiple sources)
TokenExpirationSeconds: 3607
ConfigMapName: kube-root-ca.crt
ConfigMapOptional: <nil>
DownwardAPI: true
gcp-creds:
Type: HostPath (bare host directory volume)
Path: /var/lib/minikube/google_application_credentials.json
HostPathType: File
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 9m15s default-scheduler Successfully assigned default/busybox to addons-315216
Normal Pulling 7m46s (x4 over 9m14s) kubelet Pulling image "gcr.io/k8s-minikube/busybox:1.28.4-glibc"
Warning Failed 7m46s (x4 over 9m14s) kubelet Failed to pull image "gcr.io/k8s-minikube/busybox:1.28.4-glibc": Error response from daemon: Head "https://gcr.io/v2/k8s-minikube/busybox/manifests/1.28.4-glibc": unauthorized: authentication failed
Warning Failed 7m46s (x4 over 9m14s) kubelet Error: ErrImagePull
Warning Failed 7m31s (x6 over 9m13s) kubelet Error: ImagePullBackOff
Normal BackOff 4m2s (x21 over 9m13s) kubelet Back-off pulling image "gcr.io/k8s-minikube/busybox:1.28.4-glibc"
-- /stdout --
helpers_test.go:285: <<< TestAddons/parallel/Registry FAILED: end of post-mortem logs <<<
helpers_test.go:286: ---------------------/post-mortem---------------------------------
--- FAIL: TestAddons/parallel/Registry (72.37s)