=== RUN TestAddons/parallel/Registry
=== PAUSE TestAddons/parallel/Registry
=== CONT TestAddons/parallel/Registry
addons_test.go:332: registry stabilized in 3.084123ms
addons_test.go:334: (dbg) TestAddons/parallel/Registry: waiting 6m0s for pods matching "actual-registry=true" in namespace "kube-system" ...
helpers_test.go:344: "registry-66c9cd494c-6df5h" [4849ea19-88f6-4fbc-ba0f-e290ee2d0d80] Running
addons_test.go:334: (dbg) TestAddons/parallel/Registry: actual-registry=true healthy within 5.003268843s
addons_test.go:337: (dbg) TestAddons/parallel/Registry: waiting 10m0s for pods matching "registry-proxy=true" in namespace "kube-system" ...
helpers_test.go:344: "registry-proxy-9tc94" [556af332-2257-4db0-adcb-aca469cf992d] Running
addons_test.go:337: (dbg) TestAddons/parallel/Registry: registry-proxy=true healthy within 5.0033912s
addons_test.go:342: (dbg) Run: kubectl --context addons-539053 delete po -l run=registry-test --now
addons_test.go:347: (dbg) Run: kubectl --context addons-539053 run --rm registry-test --restart=Never --image=gcr.io/k8s-minikube/busybox -it -- sh -c "wget --spider -S http://registry.kube-system.svc.cluster.local"
addons_test.go:347: (dbg) Non-zero exit: kubectl --context addons-539053 run --rm registry-test --restart=Never --image=gcr.io/k8s-minikube/busybox -it -- sh -c "wget --spider -S http://registry.kube-system.svc.cluster.local": exit status 1 (1m0.075801388s)
-- stdout --
pod "registry-test" deleted
-- /stdout --
** stderr **
error: timed out waiting for the condition
** /stderr **
addons_test.go:349: failed to hit registry.kube-system.svc.cluster.local. args "kubectl --context addons-539053 run --rm registry-test --restart=Never --image=gcr.io/k8s-minikube/busybox -it -- sh -c \"wget --spider -S http://registry.kube-system.svc.cluster.local\"" failed: exit status 1
addons_test.go:353: expected curl response be "HTTP/1.1 200", but got *pod "registry-test" deleted
*
addons_test.go:361: (dbg) Run: out/minikube-linux-amd64 -p addons-539053 ip
2024/09/16 17:28:39 [DEBUG] GET http://192.168.49.2:5000
addons_test.go:390: (dbg) Run: out/minikube-linux-amd64 -p addons-539053 addons disable registry --alsologtostderr -v=1
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:230: ======> post-mortem[TestAddons/parallel/Registry]: docker inspect <======
helpers_test.go:231: (dbg) Run: docker inspect addons-539053
helpers_test.go:235: (dbg) docker inspect addons-539053:
-- stdout --
[
{
"Id": "889620d9c22cb0f9876805e85238f1130b32e01550a250dffc5408f9e9ce0aa2",
"Created": "2024-09-16T17:15:38.700663122Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 115009,
"ExitCode": 0,
"Error": "",
"StartedAt": "2024-09-16T17:15:38.824683494Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:42cce955f9eac9d57cd22fac71bb25240691d58509ec274149a0acd1eaaf86ec",
"ResolvConfPath": "/var/lib/docker/containers/889620d9c22cb0f9876805e85238f1130b32e01550a250dffc5408f9e9ce0aa2/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/889620d9c22cb0f9876805e85238f1130b32e01550a250dffc5408f9e9ce0aa2/hostname",
"HostsPath": "/var/lib/docker/containers/889620d9c22cb0f9876805e85238f1130b32e01550a250dffc5408f9e9ce0aa2/hosts",
"LogPath": "/var/lib/docker/containers/889620d9c22cb0f9876805e85238f1130b32e01550a250dffc5408f9e9ce0aa2/889620d9c22cb0f9876805e85238f1130b32e01550a250dffc5408f9e9ce0aa2-json.log",
"Name": "/addons-539053",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"addons-539053:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {
"max-size": "100m"
}
},
"NetworkMode": "addons-539053",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 4194304000,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 8388608000,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"LowerDir": "/var/lib/docker/overlay2/7231bb84623acc78a861812a4b4c8152502c9e4d7dce91310005a19e64f4db52-init/diff:/var/lib/docker/overlay2/2bde1a12356e80260f13e7e04ea75070375a33ab4f42d4cfd7ba26956be5ad81/diff",
"MergedDir": "/var/lib/docker/overlay2/7231bb84623acc78a861812a4b4c8152502c9e4d7dce91310005a19e64f4db52/merged",
"UpperDir": "/var/lib/docker/overlay2/7231bb84623acc78a861812a4b4c8152502c9e4d7dce91310005a19e64f4db52/diff",
"WorkDir": "/var/lib/docker/overlay2/7231bb84623acc78a861812a4b4c8152502c9e4d7dce91310005a19e64f4db52/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "addons-539053",
"Source": "/var/lib/docker/volumes/addons-539053/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "addons-539053",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726481311-19649@sha256:b5dfdcf7ad8b49233db09f1c58aaf52f6522fde64cf16c939b3fc45365d24cdc",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "addons-539053",
"name.minikube.sigs.k8s.io": "addons-539053",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "5a3a40c5aa697f6cc4a86dbd6597cd54826a35038523171bb5932c1f033a7d1b",
"SandboxKey": "/var/run/docker/netns/5a3a40c5aa69",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32768"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32769"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32772"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32770"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32771"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"addons-539053": {
"IPAMConfig": {
"IPv4Address": "192.168.49.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "02:42:c0:a8:31:02",
"DriverOpts": null,
"NetworkID": "c1f2d25a0fa41d9a7ff6664261042de1aa064ad0e68b1c0e696a283eb2fe3d1a",
"EndpointID": "8338c295eda910bb9c27a3e697e58ecbf98822a66eebdefbea4b53970f1b808b",
"Gateway": "192.168.49.1",
"IPAddress": "192.168.49.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"addons-539053",
"889620d9c22c"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:239: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p addons-539053 -n addons-539053
helpers_test.go:244: <<< TestAddons/parallel/Registry FAILED: start of post-mortem logs <<<
helpers_test.go:245: ======> post-mortem[TestAddons/parallel/Registry]: minikube logs <======
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 -p addons-539053 logs -n 25
helpers_test.go:252: TestAddons/parallel/Registry logs:
-- stdout --
==> Audit <==
|---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
| Command | Args | Profile | User | Version | Start Time | End Time |
|---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
| start | --download-only -p | download-docker-294705 | jenkins | v1.34.0 | 16 Sep 24 17:15 UTC | |
| | download-docker-294705 | | | | | |
| | --alsologtostderr | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| delete | -p download-docker-294705 | download-docker-294705 | jenkins | v1.34.0 | 16 Sep 24 17:15 UTC | 16 Sep 24 17:15 UTC |
| start | --download-only -p | binary-mirror-149473 | jenkins | v1.34.0 | 16 Sep 24 17:15 UTC | |
| | binary-mirror-149473 | | | | | |
| | --alsologtostderr | | | | | |
| | --binary-mirror | | | | | |
| | http://127.0.0.1:35485 | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| delete | -p binary-mirror-149473 | binary-mirror-149473 | jenkins | v1.34.0 | 16 Sep 24 17:15 UTC | 16 Sep 24 17:15 UTC |
| addons | disable dashboard -p | addons-539053 | jenkins | v1.34.0 | 16 Sep 24 17:15 UTC | |
| | addons-539053 | | | | | |
| addons | enable dashboard -p | addons-539053 | jenkins | v1.34.0 | 16 Sep 24 17:15 UTC | |
| | addons-539053 | | | | | |
| start | -p addons-539053 --wait=true | addons-539053 | jenkins | v1.34.0 | 16 Sep 24 17:15 UTC | 16 Sep 24 17:18 UTC |
| | --memory=4000 --alsologtostderr | | | | | |
| | --addons=registry | | | | | |
| | --addons=metrics-server | | | | | |
| | --addons=volumesnapshots | | | | | |
| | --addons=csi-hostpath-driver | | | | | |
| | --addons=gcp-auth | | | | | |
| | --addons=cloud-spanner | | | | | |
| | --addons=inspektor-gadget | | | | | |
| | --addons=storage-provisioner-rancher | | | | | |
| | --addons=nvidia-device-plugin | | | | | |
| | --addons=yakd --addons=volcano | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| | --addons=ingress | | | | | |
| | --addons=ingress-dns | | | | | |
| | --addons=helm-tiller | | | | | |
| addons | addons-539053 addons disable | addons-539053 | jenkins | v1.34.0 | 16 Sep 24 17:19 UTC | 16 Sep 24 17:19 UTC |
| | volcano --alsologtostderr -v=1 | | | | | |
| addons | addons-539053 addons | addons-539053 | jenkins | v1.34.0 | 16 Sep 24 17:27 UTC | 16 Sep 24 17:27 UTC |
| | disable metrics-server | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| ssh | addons-539053 ssh cat | addons-539053 | jenkins | v1.34.0 | 16 Sep 24 17:27 UTC | 16 Sep 24 17:27 UTC |
| | /opt/local-path-provisioner/pvc-1389ca84-3e21-4c35-b54d-991231b2f504_default_test-pvc/file1 | | | | | |
| addons | addons-539053 addons disable | addons-539053 | jenkins | v1.34.0 | 16 Sep 24 17:27 UTC | 16 Sep 24 17:28 UTC |
| | storage-provisioner-rancher | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-539053 addons disable | addons-539053 | jenkins | v1.34.0 | 16 Sep 24 17:27 UTC | 16 Sep 24 17:27 UTC |
| | helm-tiller --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| addons | disable cloud-spanner -p | addons-539053 | jenkins | v1.34.0 | 16 Sep 24 17:27 UTC | 16 Sep 24 17:27 UTC |
| | addons-539053 | | | | | |
| addons | addons-539053 addons disable | addons-539053 | jenkins | v1.34.0 | 16 Sep 24 17:27 UTC | 16 Sep 24 17:28 UTC |
| | yakd --alsologtostderr -v=1 | | | | | |
| addons | disable nvidia-device-plugin | addons-539053 | jenkins | v1.34.0 | 16 Sep 24 17:28 UTC | 16 Sep 24 17:28 UTC |
| | -p addons-539053 | | | | | |
| addons | disable inspektor-gadget -p | addons-539053 | jenkins | v1.34.0 | 16 Sep 24 17:28 UTC | 16 Sep 24 17:28 UTC |
| | addons-539053 | | | | | |
| addons | addons-539053 addons | addons-539053 | jenkins | v1.34.0 | 16 Sep 24 17:28 UTC | 16 Sep 24 17:28 UTC |
| | disable csi-hostpath-driver | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | enable headlamp | addons-539053 | jenkins | v1.34.0 | 16 Sep 24 17:28 UTC | 16 Sep 24 17:28 UTC |
| | -p addons-539053 | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-539053 addons | addons-539053 | jenkins | v1.34.0 | 16 Sep 24 17:28 UTC | 16 Sep 24 17:28 UTC |
| | disable volumesnapshots | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| ssh | addons-539053 ssh curl -s | addons-539053 | jenkins | v1.34.0 | 16 Sep 24 17:28 UTC | 16 Sep 24 17:28 UTC |
| | http://127.0.0.1/ -H 'Host: | | | | | |
| | nginx.example.com' | | | | | |
| ip | addons-539053 ip | addons-539053 | jenkins | v1.34.0 | 16 Sep 24 17:28 UTC | 16 Sep 24 17:28 UTC |
| addons | addons-539053 addons disable | addons-539053 | jenkins | v1.34.0 | 16 Sep 24 17:28 UTC | 16 Sep 24 17:28 UTC |
| | ingress-dns --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| addons | addons-539053 addons disable | addons-539053 | jenkins | v1.34.0 | 16 Sep 24 17:28 UTC | |
| | ingress --alsologtostderr -v=1 | | | | | |
| ip | addons-539053 ip | addons-539053 | jenkins | v1.34.0 | 16 Sep 24 17:28 UTC | 16 Sep 24 17:28 UTC |
| addons | addons-539053 addons disable | addons-539053 | jenkins | v1.34.0 | 16 Sep 24 17:28 UTC | 16 Sep 24 17:28 UTC |
| | registry --alsologtostderr | | | | | |
| | -v=1 | | | | | |
|---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
==> Last Start <==
Log file created at: 2024/09/16 17:15:17
Running on machine: ubuntu-20-agent-5
Binary: Built with gc go1.23.0 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I0916 17:15:17.090766 114275 out.go:345] Setting OutFile to fd 1 ...
I0916 17:15:17.090996 114275 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0916 17:15:17.091004 114275 out.go:358] Setting ErrFile to fd 2...
I0916 17:15:17.091009 114275 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0916 17:15:17.091164 114275 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19649-105988/.minikube/bin
I0916 17:15:17.091733 114275 out.go:352] Setting JSON to false
I0916 17:15:17.092573 114275 start.go:129] hostinfo: {"hostname":"ubuntu-20-agent-5","uptime":3457,"bootTime":1726503460,"procs":172,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1068-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I0916 17:15:17.092667 114275 start.go:139] virtualization: kvm guest
I0916 17:15:17.094545 114275 out.go:177] * [addons-539053] minikube v1.34.0 on Ubuntu 20.04 (kvm/amd64)
I0916 17:15:17.095682 114275 out.go:177] - MINIKUBE_LOCATION=19649
I0916 17:15:17.095695 114275 notify.go:220] Checking for updates...
I0916 17:15:17.098006 114275 out.go:177] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I0916 17:15:17.099226 114275 out.go:177] - KUBECONFIG=/home/jenkins/minikube-integration/19649-105988/kubeconfig
I0916 17:15:17.100384 114275 out.go:177] - MINIKUBE_HOME=/home/jenkins/minikube-integration/19649-105988/.minikube
I0916 17:15:17.101644 114275 out.go:177] - MINIKUBE_BIN=out/minikube-linux-amd64
I0916 17:15:17.102862 114275 out.go:177] - MINIKUBE_FORCE_SYSTEMD=
I0916 17:15:17.104133 114275 driver.go:394] Setting default libvirt URI to qemu:///system
I0916 17:15:17.124455 114275 docker.go:123] docker version: linux-27.2.1:Docker Engine - Community
I0916 17:15:17.124588 114275 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0916 17:15:17.170896 114275 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:28 OomKillDisable:true NGoroutines:46 SystemTime:2024-09-16 17:15:17.162568553 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1068-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:x86
_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33647943680 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-5 Labels:[] ExperimentalBuild:false ServerVersion:27.2.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:7f7fdf5fed64eb6a7caf99b3e12efcf9d60e311c Expected:7f7fdf5fed64eb6a7caf99b3e12efcf9d60e311c} RuncCommit:{ID:v1.1.14-0-g2c9f560 Expected:v1.1.14-0-g2c9f560} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErr
ors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.16.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.29.2] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0916 17:15:17.171009 114275 docker.go:318] overlay module found
I0916 17:15:17.172908 114275 out.go:177] * Using the docker driver based on user configuration
I0916 17:15:17.174290 114275 start.go:297] selected driver: docker
I0916 17:15:17.174305 114275 start.go:901] validating driver "docker" against <nil>
I0916 17:15:17.174316 114275 start.go:912] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I0916 17:15:17.175054 114275 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0916 17:15:17.219084 114275 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:28 OomKillDisable:true NGoroutines:46 SystemTime:2024-09-16 17:15:17.210650968 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1068-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:x86
_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33647943680 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-5 Labels:[] ExperimentalBuild:false ServerVersion:27.2.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:7f7fdf5fed64eb6a7caf99b3e12efcf9d60e311c Expected:7f7fdf5fed64eb6a7caf99b3e12efcf9d60e311c} RuncCommit:{ID:v1.1.14-0-g2c9f560 Expected:v1.1.14-0-g2c9f560} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErr
ors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.16.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.29.2] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0916 17:15:17.219289 114275 start_flags.go:310] no existing cluster config was found, will generate one from the flags
I0916 17:15:17.219517 114275 start_flags.go:947] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0916 17:15:17.221316 114275 out.go:177] * Using Docker driver with root privileges
I0916 17:15:17.222628 114275 cni.go:84] Creating CNI manager for ""
I0916 17:15:17.222687 114275 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0916 17:15:17.222700 114275 start_flags.go:319] Found "bridge CNI" CNI - setting NetworkPlugin=cni
I0916 17:15:17.222766 114275 start.go:340] cluster config:
{Name:addons-539053 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726481311-19649@sha256:b5dfdcf7ad8b49233db09f1c58aaf52f6522fde64cf16c939b3fc45365d24cdc Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.1 ClusterName:addons-539053 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime
:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock:
SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0916 17:15:17.224103 114275 out.go:177] * Starting "addons-539053" primary control-plane node in "addons-539053" cluster
I0916 17:15:17.225181 114275 cache.go:121] Beginning downloading kic base image for docker with docker
I0916 17:15:17.226408 114275 out.go:177] * Pulling base image v0.0.45-1726481311-19649 ...
I0916 17:15:17.227644 114275 preload.go:131] Checking if preload exists for k8s version v1.31.1 and runtime docker
I0916 17:15:17.227668 114275 image.go:79] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726481311-19649@sha256:b5dfdcf7ad8b49233db09f1c58aaf52f6522fde64cf16c939b3fc45365d24cdc in local docker daemon
I0916 17:15:17.227678 114275 preload.go:146] Found local preload: /home/jenkins/minikube-integration/19649-105988/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.1-docker-overlay2-amd64.tar.lz4
I0916 17:15:17.227685 114275 cache.go:56] Caching tarball of preloaded images
I0916 17:15:17.227772 114275 preload.go:172] Found /home/jenkins/minikube-integration/19649-105988/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.1-docker-overlay2-amd64.tar.lz4 in cache, skipping download
I0916 17:15:17.227784 114275 cache.go:59] Finished verifying existence of preloaded tar for v1.31.1 on docker
I0916 17:15:17.228171 114275 profile.go:143] Saving config to /home/jenkins/minikube-integration/19649-105988/.minikube/profiles/addons-539053/config.json ...
I0916 17:15:17.228198 114275 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19649-105988/.minikube/profiles/addons-539053/config.json: {Name:mk2bb66488164ef9dcd50e32bedebb588655529e Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 17:15:17.242358 114275 cache.go:149] Downloading gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726481311-19649@sha256:b5dfdcf7ad8b49233db09f1c58aaf52f6522fde64cf16c939b3fc45365d24cdc to local cache
I0916 17:15:17.242460 114275 image.go:63] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726481311-19649@sha256:b5dfdcf7ad8b49233db09f1c58aaf52f6522fde64cf16c939b3fc45365d24cdc in local cache directory
I0916 17:15:17.242473 114275 image.go:66] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726481311-19649@sha256:b5dfdcf7ad8b49233db09f1c58aaf52f6522fde64cf16c939b3fc45365d24cdc in local cache directory, skipping pull
I0916 17:15:17.242477 114275 image.go:135] gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726481311-19649@sha256:b5dfdcf7ad8b49233db09f1c58aaf52f6522fde64cf16c939b3fc45365d24cdc exists in cache, skipping pull
I0916 17:15:17.242484 114275 cache.go:152] successfully saved gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726481311-19649@sha256:b5dfdcf7ad8b49233db09f1c58aaf52f6522fde64cf16c939b3fc45365d24cdc as a tarball
I0916 17:15:17.242491 114275 cache.go:162] Loading gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726481311-19649@sha256:b5dfdcf7ad8b49233db09f1c58aaf52f6522fde64cf16c939b3fc45365d24cdc from local cache
I0916 17:15:29.445316 114275 cache.go:164] successfully loaded and using gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726481311-19649@sha256:b5dfdcf7ad8b49233db09f1c58aaf52f6522fde64cf16c939b3fc45365d24cdc from cached tarball
I0916 17:15:29.445357 114275 cache.go:194] Successfully downloaded all kic artifacts
I0916 17:15:29.445402 114275 start.go:360] acquireMachinesLock for addons-539053: {Name:mk0043d3a6bacbded59cc72569f5719de0510390 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0916 17:15:29.445511 114275 start.go:364] duration metric: took 87.72µs to acquireMachinesLock for "addons-539053"
I0916 17:15:29.445569 114275 start.go:93] Provisioning new machine with config: &{Name:addons-539053 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726481311-19649@sha256:b5dfdcf7ad8b49233db09f1c58aaf52f6522fde64cf16c939b3fc45365d24cdc Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.1 ClusterName:addons-539053 Namespace:default APIServerHAVIP: APIServerName:min
ikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false Cust
omQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}
I0916 17:15:29.445675 114275 start.go:125] createHost starting for "" (driver="docker")
I0916 17:15:29.447439 114275 out.go:235] * Creating docker container (CPUs=2, Memory=4000MB) ...
I0916 17:15:29.447714 114275 start.go:159] libmachine.API.Create for "addons-539053" (driver="docker")
I0916 17:15:29.447752 114275 client.go:168] LocalClient.Create starting
I0916 17:15:29.447844 114275 main.go:141] libmachine: Creating CA: /home/jenkins/minikube-integration/19649-105988/.minikube/certs/ca.pem
I0916 17:15:29.655776 114275 main.go:141] libmachine: Creating client certificate: /home/jenkins/minikube-integration/19649-105988/.minikube/certs/cert.pem
I0916 17:15:29.877092 114275 cli_runner.go:164] Run: docker network inspect addons-539053 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W0916 17:15:29.892920 114275 cli_runner.go:211] docker network inspect addons-539053 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I0916 17:15:29.893012 114275 network_create.go:284] running [docker network inspect addons-539053] to gather additional debugging logs...
I0916 17:15:29.893037 114275 cli_runner.go:164] Run: docker network inspect addons-539053
W0916 17:15:29.907959 114275 cli_runner.go:211] docker network inspect addons-539053 returned with exit code 1
I0916 17:15:29.907995 114275 network_create.go:287] error running [docker network inspect addons-539053]: docker network inspect addons-539053: exit status 1
stdout:
[]
stderr:
Error response from daemon: network addons-539053 not found
I0916 17:15:29.908013 114275 network_create.go:289] output of [docker network inspect addons-539053]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network addons-539053 not found
** /stderr **
I0916 17:15:29.908114 114275 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0916 17:15:29.924362 114275 network.go:206] using free private subnet 192.168.49.0/24: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc00050aa40}
I0916 17:15:29.924427 114275 network_create.go:124] attempt to create docker network addons-539053 192.168.49.0/24 with gateway 192.168.49.1 and MTU of 1500 ...
I0916 17:15:29.924489 114275 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.49.0/24 --gateway=192.168.49.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=addons-539053 addons-539053
I0916 17:15:29.984627 114275 network_create.go:108] docker network addons-539053 192.168.49.0/24 created
I0916 17:15:29.984668 114275 kic.go:121] calculated static IP "192.168.49.2" for the "addons-539053" container
I0916 17:15:29.984738 114275 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0916 17:15:29.999220 114275 cli_runner.go:164] Run: docker volume create addons-539053 --label name.minikube.sigs.k8s.io=addons-539053 --label created_by.minikube.sigs.k8s.io=true
I0916 17:15:30.015617 114275 oci.go:103] Successfully created a docker volume addons-539053
I0916 17:15:30.015684 114275 cli_runner.go:164] Run: docker run --rm --name addons-539053-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-539053 --entrypoint /usr/bin/test -v addons-539053:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726481311-19649@sha256:b5dfdcf7ad8b49233db09f1c58aaf52f6522fde64cf16c939b3fc45365d24cdc -d /var/lib
I0916 17:15:34.708232 114275 cli_runner.go:217] Completed: docker run --rm --name addons-539053-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-539053 --entrypoint /usr/bin/test -v addons-539053:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726481311-19649@sha256:b5dfdcf7ad8b49233db09f1c58aaf52f6522fde64cf16c939b3fc45365d24cdc -d /var/lib: (4.692499232s)
I0916 17:15:34.708261 114275 oci.go:107] Successfully prepared a docker volume addons-539053
I0916 17:15:34.708287 114275 preload.go:131] Checking if preload exists for k8s version v1.31.1 and runtime docker
I0916 17:15:34.708316 114275 kic.go:194] Starting extracting preloaded images to volume ...
I0916 17:15:34.708382 114275 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/19649-105988/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.1-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v addons-539053:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726481311-19649@sha256:b5dfdcf7ad8b49233db09f1c58aaf52f6522fde64cf16c939b3fc45365d24cdc -I lz4 -xf /preloaded.tar -C /extractDir
I0916 17:15:38.643736 114275 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/19649-105988/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.1-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v addons-539053:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726481311-19649@sha256:b5dfdcf7ad8b49233db09f1c58aaf52f6522fde64cf16c939b3fc45365d24cdc -I lz4 -xf /preloaded.tar -C /extractDir: (3.935300228s)
I0916 17:15:38.643778 114275 kic.go:203] duration metric: took 3.935457007s to extract preloaded images to volume ...
W0916 17:15:38.643930 114275 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I0916 17:15:38.644070 114275 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0916 17:15:38.686917 114275 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname addons-539053 --name addons-539053 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-539053 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=addons-539053 --network addons-539053 --ip 192.168.49.2 --volume addons-539053:/var --security-opt apparmor=unconfined --memory=4000mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726481311-19649@sha256:b5dfdcf7ad8b49233db09f1c58aaf52f6522fde64cf16c939b3fc45365d24cdc
I0916 17:15:38.991613 114275 cli_runner.go:164] Run: docker container inspect addons-539053 --format={{.State.Running}}
I0916 17:15:39.008107 114275 cli_runner.go:164] Run: docker container inspect addons-539053 --format={{.State.Status}}
I0916 17:15:39.024963 114275 cli_runner.go:164] Run: docker exec addons-539053 stat /var/lib/dpkg/alternatives/iptables
I0916 17:15:39.065071 114275 oci.go:144] the created container "addons-539053" has a running status.
I0916 17:15:39.065110 114275 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/19649-105988/.minikube/machines/addons-539053/id_rsa...
I0916 17:15:39.249516 114275 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/19649-105988/.minikube/machines/addons-539053/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0916 17:15:39.271435 114275 cli_runner.go:164] Run: docker container inspect addons-539053 --format={{.State.Status}}
I0916 17:15:39.289444 114275 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0916 17:15:39.289467 114275 kic_runner.go:114] Args: [docker exec --privileged addons-539053 chown docker:docker /home/docker/.ssh/authorized_keys]
I0916 17:15:39.362118 114275 cli_runner.go:164] Run: docker container inspect addons-539053 --format={{.State.Status}}
I0916 17:15:39.380718 114275 machine.go:93] provisionDockerMachine start ...
I0916 17:15:39.380814 114275 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-539053
I0916 17:15:39.398104 114275 main.go:141] libmachine: Using SSH client type: native
I0916 17:15:39.398318 114275 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x86c560] 0x86f240 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0916 17:15:39.398332 114275 main.go:141] libmachine: About to run SSH command:
hostname
I0916 17:15:39.593218 114275 main.go:141] libmachine: SSH cmd err, output: <nil>: addons-539053
I0916 17:15:39.593253 114275 ubuntu.go:169] provisioning hostname "addons-539053"
I0916 17:15:39.593328 114275 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-539053
I0916 17:15:39.610327 114275 main.go:141] libmachine: Using SSH client type: native
I0916 17:15:39.610544 114275 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x86c560] 0x86f240 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0916 17:15:39.610561 114275 main.go:141] libmachine: About to run SSH command:
sudo hostname addons-539053 && echo "addons-539053" | sudo tee /etc/hostname
I0916 17:15:39.739666 114275 main.go:141] libmachine: SSH cmd err, output: <nil>: addons-539053
I0916 17:15:39.739736 114275 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-539053
I0916 17:15:39.755442 114275 main.go:141] libmachine: Using SSH client type: native
I0916 17:15:39.755614 114275 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x86c560] 0x86f240 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0916 17:15:39.755631 114275 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\saddons-539053' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 addons-539053/g' /etc/hosts;
else
echo '127.0.1.1 addons-539053' | sudo tee -a /etc/hosts;
fi
fi
I0916 17:15:39.873921 114275 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0916 17:15:39.873951 114275 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/19649-105988/.minikube CaCertPath:/home/jenkins/minikube-integration/19649-105988/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/19649-105988/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/19649-105988/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/19649-105988/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/19649-105988/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/19649-105988/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/19649-105988/.minikube}
I0916 17:15:39.874010 114275 ubuntu.go:177] setting up certificates
I0916 17:15:39.874022 114275 provision.go:84] configureAuth start
I0916 17:15:39.874109 114275 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-539053
I0916 17:15:39.889215 114275 provision.go:143] copyHostCerts
I0916 17:15:39.889291 114275 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19649-105988/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/19649-105988/.minikube/cert.pem (1123 bytes)
I0916 17:15:39.889407 114275 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19649-105988/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/19649-105988/.minikube/key.pem (1675 bytes)
I0916 17:15:39.889476 114275 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19649-105988/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/19649-105988/.minikube/ca.pem (1078 bytes)
I0916 17:15:39.889538 114275 provision.go:117] generating server cert: /home/jenkins/minikube-integration/19649-105988/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/19649-105988/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/19649-105988/.minikube/certs/ca-key.pem org=jenkins.addons-539053 san=[127.0.0.1 192.168.49.2 addons-539053 localhost minikube]
I0916 17:15:40.095398 114275 provision.go:177] copyRemoteCerts
I0916 17:15:40.095478 114275 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0916 17:15:40.095523 114275 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-539053
I0916 17:15:40.111120 114275 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19649-105988/.minikube/machines/addons-539053/id_rsa Username:docker}
I0916 17:15:40.198037 114275 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19649-105988/.minikube/machines/server.pem --> /etc/docker/server.pem (1208 bytes)
I0916 17:15:40.218857 114275 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19649-105988/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I0916 17:15:40.238955 114275 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19649-105988/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I0916 17:15:40.259024 114275 provision.go:87] duration metric: took 384.985147ms to configureAuth
I0916 17:15:40.259053 114275 ubuntu.go:193] setting minikube options for container-runtime
I0916 17:15:40.259222 114275 config.go:182] Loaded profile config "addons-539053": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.31.1
I0916 17:15:40.259317 114275 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-539053
I0916 17:15:40.275037 114275 main.go:141] libmachine: Using SSH client type: native
I0916 17:15:40.275201 114275 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x86c560] 0x86f240 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0916 17:15:40.275213 114275 main.go:141] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I0916 17:15:40.394040 114275 main.go:141] libmachine: SSH cmd err, output: <nil>: overlay
I0916 17:15:40.394079 114275 ubuntu.go:71] root file system type: overlay
I0916 17:15:40.394197 114275 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I0916 17:15:40.394252 114275 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-539053
I0916 17:15:40.409896 114275 main.go:141] libmachine: Using SSH client type: native
I0916 17:15:40.410088 114275 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x86c560] 0x86f240 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0916 17:15:40.410149 114275 main.go:141] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I0916 17:15:40.540066 114275 main.go:141] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
I0916 17:15:40.540164 114275 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-539053
I0916 17:15:40.556522 114275 main.go:141] libmachine: Using SSH client type: native
I0916 17:15:40.556696 114275 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x86c560] 0x86f240 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0916 17:15:40.556717 114275 main.go:141] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I0916 17:15:41.234799 114275 main.go:141] libmachine: SSH cmd err, output: <nil>: --- /lib/systemd/system/docker.service 2024-09-06 12:06:41.000000000 +0000
+++ /lib/systemd/system/docker.service.new 2024-09-16 17:15:40.536278613 +0000
@@ -1,46 +1,49 @@
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
-After=network-online.target docker.socket firewalld.service containerd.service time-set.target
-Wants=network-online.target containerd.service
+BindsTo=containerd.service
+After=network-online.target firewalld.service containerd.service
+Wants=network-online.target
Requires=docker.socket
+StartLimitBurst=3
+StartLimitIntervalSec=60
[Service]
Type=notify
-# the default is not to use systemd for cgroups because the delegate issues still
-# exists and systemd currently does not support the cgroup feature set required
-# for containers run by docker
-ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
-ExecReload=/bin/kill -s HUP $MAINPID
-TimeoutStartSec=0
-RestartSec=2
-Restart=always
+Restart=on-failure
-# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
-# Both the old, and new location are accepted by systemd 229 and up, so using the old location
-# to make them work for either version of systemd.
-StartLimitBurst=3
-# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
-# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
-# this option work for either version of systemd.
-StartLimitInterval=60s
+
+# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
+# The base configuration already specifies an 'ExecStart=...' command. The first directive
+# here is to clear out that command inherited from the base configuration. Without this,
+# the command from the base configuration and the command specified here are treated as
+# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
+# will catch this invalid input and refuse to start the service with an error like:
+# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
+
+# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
+# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
+ExecStart=
+ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
+ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
+LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
-# Comment TasksMax if your systemd version does not support it.
-# Only systemd 226 and above support this option.
+# Uncomment TasksMax if your systemd version supports it.
+# Only systemd 226 and above support this version.
TasksMax=infinity
+TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
-OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
Synchronizing state of docker.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable docker
I0916 17:15:41.234831 114275 machine.go:96] duration metric: took 1.854093128s to provisionDockerMachine
I0916 17:15:41.234843 114275 client.go:171] duration metric: took 11.787082498s to LocalClient.Create
I0916 17:15:41.234858 114275 start.go:167] duration metric: took 11.787148132s to libmachine.API.Create "addons-539053"
I0916 17:15:41.234866 114275 start.go:293] postStartSetup for "addons-539053" (driver="docker")
I0916 17:15:41.234879 114275 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0916 17:15:41.234948 114275 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0916 17:15:41.235003 114275 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-539053
I0916 17:15:41.250233 114275 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19649-105988/.minikube/machines/addons-539053/id_rsa Username:docker}
I0916 17:15:41.338422 114275 ssh_runner.go:195] Run: cat /etc/os-release
I0916 17:15:41.341234 114275 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0916 17:15:41.341265 114275 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0916 17:15:41.341272 114275 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0916 17:15:41.341279 114275 info.go:137] Remote host: Ubuntu 22.04.4 LTS
I0916 17:15:41.341289 114275 filesync.go:126] Scanning /home/jenkins/minikube-integration/19649-105988/.minikube/addons for local assets ...
I0916 17:15:41.341344 114275 filesync.go:126] Scanning /home/jenkins/minikube-integration/19649-105988/.minikube/files for local assets ...
I0916 17:15:41.341366 114275 start.go:296] duration metric: took 106.493761ms for postStartSetup
I0916 17:15:41.341620 114275 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-539053
I0916 17:15:41.357158 114275 profile.go:143] Saving config to /home/jenkins/minikube-integration/19649-105988/.minikube/profiles/addons-539053/config.json ...
I0916 17:15:41.357388 114275 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0916 17:15:41.357426 114275 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-539053
I0916 17:15:41.373021 114275 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19649-105988/.minikube/machines/addons-539053/id_rsa Username:docker}
I0916 17:15:41.458463 114275 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0916 17:15:41.462248 114275 start.go:128] duration metric: took 12.016554424s to createHost
I0916 17:15:41.462276 114275 start.go:83] releasing machines lock for "addons-539053", held for 12.016750633s
I0916 17:15:41.462345 114275 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-539053
I0916 17:15:41.478248 114275 ssh_runner.go:195] Run: cat /version.json
I0916 17:15:41.478265 114275 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0916 17:15:41.478311 114275 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-539053
I0916 17:15:41.478338 114275 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-539053
I0916 17:15:41.494965 114275 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19649-105988/.minikube/machines/addons-539053/id_rsa Username:docker}
I0916 17:15:41.496160 114275 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19649-105988/.minikube/machines/addons-539053/id_rsa Username:docker}
I0916 17:15:41.581676 114275 ssh_runner.go:195] Run: systemctl --version
I0916 17:15:41.654494 114275 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0916 17:15:41.658667 114275 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I0916 17:15:41.680057 114275 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I0916 17:15:41.680126 114275 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0916 17:15:41.703644 114275 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I0916 17:15:41.703674 114275 start.go:495] detecting cgroup driver to use...
I0916 17:15:41.703709 114275 detect.go:187] detected "cgroupfs" cgroup driver on host os
I0916 17:15:41.703849 114275 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0916 17:15:41.717531 114275 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10"|' /etc/containerd/config.toml"
I0916 17:15:41.725781 114275 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0916 17:15:41.734006 114275 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I0916 17:15:41.734060 114275 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I0916 17:15:41.742226 114275 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0916 17:15:41.750301 114275 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0916 17:15:41.758277 114275 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0916 17:15:41.766987 114275 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0916 17:15:41.774622 114275 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0916 17:15:41.782684 114275 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0916 17:15:41.791199 114275 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0916 17:15:41.799345 114275 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0916 17:15:41.806310 114275 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0916 17:15:41.813584 114275 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0916 17:15:41.885354 114275 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0916 17:15:41.960023 114275 start.go:495] detecting cgroup driver to use...
I0916 17:15:41.960067 114275 detect.go:187] detected "cgroupfs" cgroup driver on host os
I0916 17:15:41.960117 114275 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I0916 17:15:41.971072 114275 cruntime.go:279] skipping containerd shutdown because we are bound to it
I0916 17:15:41.971146 114275 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0916 17:15:41.981404 114275 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I0916 17:15:41.995814 114275 ssh_runner.go:195] Run: which cri-dockerd
I0916 17:15:41.999199 114275 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I0916 17:15:42.007331 114275 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (190 bytes)
I0916 17:15:42.023272 114275 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I0916 17:15:42.112084 114275 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I0916 17:15:42.198006 114275 docker.go:574] configuring docker to use "cgroupfs" as cgroup driver...
I0916 17:15:42.198168 114275 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
I0916 17:15:42.214915 114275 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0916 17:15:42.291451 114275 ssh_runner.go:195] Run: sudo systemctl restart docker
I0916 17:15:42.531066 114275 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I0916 17:15:42.541254 114275 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0916 17:15:42.551521 114275 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I0916 17:15:42.625587 114275 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I0916 17:15:42.701692 114275 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0916 17:15:42.782106 114275 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I0916 17:15:42.793767 114275 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0916 17:15:42.803012 114275 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0916 17:15:42.882179 114275 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I0916 17:15:42.938157 114275 start.go:542] Will wait 60s for socket path /var/run/cri-dockerd.sock
I0916 17:15:42.938255 114275 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I0916 17:15:42.941880 114275 start.go:563] Will wait 60s for crictl version
I0916 17:15:42.941924 114275 ssh_runner.go:195] Run: which crictl
I0916 17:15:42.945244 114275 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0916 17:15:42.976942 114275 start.go:579] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 27.2.1
RuntimeApiVersion: v1
I0916 17:15:42.977011 114275 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0916 17:15:42.998198 114275 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0916 17:15:43.021935 114275 out.go:235] * Preparing Kubernetes v1.31.1 on Docker 27.2.1 ...
I0916 17:15:43.022007 114275 cli_runner.go:164] Run: docker network inspect addons-539053 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0916 17:15:43.036769 114275 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I0916 17:15:43.040009 114275 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0916 17:15:43.049524 114275 kubeadm.go:883] updating cluster {Name:addons-539053 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726481311-19649@sha256:b5dfdcf7ad8b49233db09f1c58aaf52f6522fde64cf16c939b3fc45365d24cdc Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.1 ClusterName:addons-539053 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNa
mes:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuF
irmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I0916 17:15:43.049633 114275 preload.go:131] Checking if preload exists for k8s version v1.31.1 and runtime docker
I0916 17:15:43.049679 114275 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0916 17:15:43.067154 114275 docker.go:685] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.31.1
registry.k8s.io/kube-controller-manager:v1.31.1
registry.k8s.io/kube-scheduler:v1.31.1
registry.k8s.io/kube-proxy:v1.31.1
registry.k8s.io/coredns/coredns:v1.11.3
registry.k8s.io/etcd:3.5.15-0
registry.k8s.io/pause:3.10
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I0916 17:15:43.067174 114275 docker.go:615] Images already preloaded, skipping extraction
I0916 17:15:43.067253 114275 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0916 17:15:43.085531 114275 docker.go:685] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.31.1
registry.k8s.io/kube-controller-manager:v1.31.1
registry.k8s.io/kube-scheduler:v1.31.1
registry.k8s.io/kube-proxy:v1.31.1
registry.k8s.io/coredns/coredns:v1.11.3
registry.k8s.io/etcd:3.5.15-0
registry.k8s.io/pause:3.10
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I0916 17:15:43.085560 114275 cache_images.go:84] Images are preloaded, skipping loading
I0916 17:15:43.085573 114275 kubeadm.go:934] updating node { 192.168.49.2 8443 v1.31.1 docker true true} ...
I0916 17:15:43.085675 114275 kubeadm.go:946] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.31.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=addons-539053 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.2
[Install]
config:
{KubernetesVersion:v1.31.1 ClusterName:addons-539053 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0916 17:15:43.085731 114275 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
I0916 17:15:43.128103 114275 cni.go:84] Creating CNI manager for ""
I0916 17:15:43.128131 114275 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0916 17:15:43.128144 114275 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
I0916 17:15:43.128161 114275 kubeadm.go:181] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8443 KubernetesVersion:v1.31.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:addons-539053 NodeName:addons-539053 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.49.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kuber
netes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I0916 17:15:43.128286 114275 kubeadm.go:187] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.49.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
name: "addons-539053"
kubeletExtraArgs:
node-ip: 192.168.49.2
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.31.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I0916 17:15:43.128340 114275 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.31.1
I0916 17:15:43.136325 114275 binaries.go:44] Found k8s binaries, skipping transfer
I0916 17:15:43.136385 114275 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I0916 17:15:43.144805 114275 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (312 bytes)
I0916 17:15:43.160973 114275 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0916 17:15:43.176775 114275 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2155 bytes)
I0916 17:15:43.191892 114275 ssh_runner.go:195] Run: grep 192.168.49.2 control-plane.minikube.internal$ /etc/hosts
I0916 17:15:43.194808 114275 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0916 17:15:43.204082 114275 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0916 17:15:43.279024 114275 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0916 17:15:43.290750 114275 certs.go:68] Setting up /home/jenkins/minikube-integration/19649-105988/.minikube/profiles/addons-539053 for IP: 192.168.49.2
I0916 17:15:43.290784 114275 certs.go:194] generating shared ca certs ...
I0916 17:15:43.290808 114275 certs.go:226] acquiring lock for ca certs: {Name:mk8d7403e6a7d2260afa4bf6d78cd24d9849ff20 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 17:15:43.290932 114275 certs.go:240] generating "minikubeCA" ca cert: /home/jenkins/minikube-integration/19649-105988/.minikube/ca.key
I0916 17:15:43.455574 114275 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19649-105988/.minikube/ca.crt ...
I0916 17:15:43.455603 114275 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19649-105988/.minikube/ca.crt: {Name:mk5421114da3e7f83dc89907491e68c2f01dfa63 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 17:15:43.455767 114275 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19649-105988/.minikube/ca.key ...
I0916 17:15:43.455779 114275 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19649-105988/.minikube/ca.key: {Name:mkdae9af3ae3b70f7f6d4ebd123324c2137abdc2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 17:15:43.455845 114275 certs.go:240] generating "proxyClientCA" ca cert: /home/jenkins/minikube-integration/19649-105988/.minikube/proxy-client-ca.key
I0916 17:15:43.533984 114275 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19649-105988/.minikube/proxy-client-ca.crt ...
I0916 17:15:43.534013 114275 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19649-105988/.minikube/proxy-client-ca.crt: {Name:mk5c12e1021b71df2d793aadf8552022c180ce5a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 17:15:43.534204 114275 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19649-105988/.minikube/proxy-client-ca.key ...
I0916 17:15:43.534222 114275 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19649-105988/.minikube/proxy-client-ca.key: {Name:mk56ead1198144698658ff79b33e0e7c5d5c340d Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 17:15:43.534290 114275 certs.go:256] generating profile certs ...
I0916 17:15:43.534344 114275 certs.go:363] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/19649-105988/.minikube/profiles/addons-539053/client.key
I0916 17:15:43.534365 114275 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19649-105988/.minikube/profiles/addons-539053/client.crt with IP's: []
I0916 17:15:43.667870 114275 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19649-105988/.minikube/profiles/addons-539053/client.crt ...
I0916 17:15:43.667903 114275 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19649-105988/.minikube/profiles/addons-539053/client.crt: {Name:mkef3e8be6dbea07877b7cbc06795313c4b44cb4 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 17:15:43.668067 114275 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19649-105988/.minikube/profiles/addons-539053/client.key ...
I0916 17:15:43.668079 114275 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19649-105988/.minikube/profiles/addons-539053/client.key: {Name:mkd134fec679436d855d1dbaa0ff9a6b3557b4d7 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 17:15:43.668147 114275 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/19649-105988/.minikube/profiles/addons-539053/apiserver.key.54337ae5
I0916 17:15:43.668165 114275 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19649-105988/.minikube/profiles/addons-539053/apiserver.crt.54337ae5 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.49.2]
I0916 17:15:43.750084 114275 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19649-105988/.minikube/profiles/addons-539053/apiserver.crt.54337ae5 ...
I0916 17:15:43.750114 114275 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19649-105988/.minikube/profiles/addons-539053/apiserver.crt.54337ae5: {Name:mka65a38a6c0b04201361159c9235ae7d0d926fd Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 17:15:43.750269 114275 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19649-105988/.minikube/profiles/addons-539053/apiserver.key.54337ae5 ...
I0916 17:15:43.750284 114275 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19649-105988/.minikube/profiles/addons-539053/apiserver.key.54337ae5: {Name:mk8f6e3b9f7146e29b844a2b2482fd0196539177 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 17:15:43.750351 114275 certs.go:381] copying /home/jenkins/minikube-integration/19649-105988/.minikube/profiles/addons-539053/apiserver.crt.54337ae5 -> /home/jenkins/minikube-integration/19649-105988/.minikube/profiles/addons-539053/apiserver.crt
I0916 17:15:43.750428 114275 certs.go:385] copying /home/jenkins/minikube-integration/19649-105988/.minikube/profiles/addons-539053/apiserver.key.54337ae5 -> /home/jenkins/minikube-integration/19649-105988/.minikube/profiles/addons-539053/apiserver.key
I0916 17:15:43.750472 114275 certs.go:363] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/19649-105988/.minikube/profiles/addons-539053/proxy-client.key
I0916 17:15:43.750492 114275 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19649-105988/.minikube/profiles/addons-539053/proxy-client.crt with IP's: []
I0916 17:15:43.930096 114275 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19649-105988/.minikube/profiles/addons-539053/proxy-client.crt ...
I0916 17:15:43.930129 114275 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19649-105988/.minikube/profiles/addons-539053/proxy-client.crt: {Name:mk656ff76055d13f8e411a076941f52e4cc3cdba Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 17:15:43.930318 114275 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19649-105988/.minikube/profiles/addons-539053/proxy-client.key ...
I0916 17:15:43.930336 114275 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19649-105988/.minikube/profiles/addons-539053/proxy-client.key: {Name:mka75d9bfe13d475c775e973b891986cf6d1c9a8 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 17:15:43.930532 114275 certs.go:484] found cert: /home/jenkins/minikube-integration/19649-105988/.minikube/certs/ca-key.pem (1679 bytes)
I0916 17:15:43.930572 114275 certs.go:484] found cert: /home/jenkins/minikube-integration/19649-105988/.minikube/certs/ca.pem (1078 bytes)
I0916 17:15:43.930593 114275 certs.go:484] found cert: /home/jenkins/minikube-integration/19649-105988/.minikube/certs/cert.pem (1123 bytes)
I0916 17:15:43.930613 114275 certs.go:484] found cert: /home/jenkins/minikube-integration/19649-105988/.minikube/certs/key.pem (1675 bytes)
I0916 17:15:43.931250 114275 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19649-105988/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0916 17:15:43.952700 114275 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19649-105988/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I0916 17:15:43.972419 114275 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19649-105988/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0916 17:15:43.992075 114275 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19649-105988/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I0916 17:15:44.012068 114275 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19649-105988/.minikube/profiles/addons-539053/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1419 bytes)
I0916 17:15:44.031879 114275 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19649-105988/.minikube/profiles/addons-539053/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
I0916 17:15:44.051587 114275 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19649-105988/.minikube/profiles/addons-539053/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0916 17:15:44.071246 114275 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19649-105988/.minikube/profiles/addons-539053/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I0916 17:15:44.090512 114275 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19649-105988/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0916 17:15:44.113504 114275 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I0916 17:15:44.130398 114275 ssh_runner.go:195] Run: openssl version
I0916 17:15:44.135421 114275 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0916 17:15:44.143645 114275 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0916 17:15:44.147073 114275 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Sep 16 17:15 /usr/share/ca-certificates/minikubeCA.pem
I0916 17:15:44.147126 114275 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0916 17:15:44.153040 114275 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0916 17:15:44.160928 114275 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0916 17:15:44.163747 114275 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I0916 17:15:44.163796 114275 kubeadm.go:392] StartCluster: {Name:addons-539053 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726481311-19649@sha256:b5dfdcf7ad8b49233db09f1c58aaf52f6522fde64cf16c939b3fc45365d24cdc Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.1 ClusterName:addons-539053 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames
:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirm
warePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0916 17:15:44.163888 114275 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I0916 17:15:44.180442 114275 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I0916 17:15:44.187988 114275 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I0916 17:15:44.195441 114275 kubeadm.go:214] ignoring SystemVerification for kubeadm because of docker driver
I0916 17:15:44.195486 114275 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I0916 17:15:44.202724 114275 kubeadm.go:155] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I0916 17:15:44.202744 114275 kubeadm.go:157] found existing configuration files:
I0916 17:15:44.202777 114275 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I0916 17:15:44.209868 114275 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I0916 17:15:44.209925 114275 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I0916 17:15:44.217008 114275 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I0916 17:15:44.224198 114275 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I0916 17:15:44.224244 114275 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I0916 17:15:44.231146 114275 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I0916 17:15:44.238037 114275 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I0916 17:15:44.238103 114275 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I0916 17:15:44.244957 114275 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I0916 17:15:44.252070 114275 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I0916 17:15:44.252105 114275 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I0916 17:15:44.258968 114275 ssh_runner.go:286] Start: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.31.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I0916 17:15:44.292365 114275 kubeadm.go:310] [init] Using Kubernetes version: v1.31.1
I0916 17:15:44.292450 114275 kubeadm.go:310] [preflight] Running pre-flight checks
I0916 17:15:44.312218 114275 kubeadm.go:310] [preflight] The system verification failed. Printing the output from the verification:
I0916 17:15:44.312295 114275 kubeadm.go:310] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1068-gcp[0m
I0916 17:15:44.312379 114275 kubeadm.go:310] [0;37mOS[0m: [0;32mLinux[0m
I0916 17:15:44.312463 114275 kubeadm.go:310] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I0916 17:15:44.312613 114275 kubeadm.go:310] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I0916 17:15:44.312686 114275 kubeadm.go:310] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I0916 17:15:44.312741 114275 kubeadm.go:310] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I0916 17:15:44.312781 114275 kubeadm.go:310] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I0916 17:15:44.312831 114275 kubeadm.go:310] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I0916 17:15:44.312875 114275 kubeadm.go:310] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I0916 17:15:44.312913 114275 kubeadm.go:310] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I0916 17:15:44.312952 114275 kubeadm.go:310] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I0916 17:15:44.359117 114275 kubeadm.go:310] [preflight] Pulling images required for setting up a Kubernetes cluster
I0916 17:15:44.359257 114275 kubeadm.go:310] [preflight] This might take a minute or two, depending on the speed of your internet connection
I0916 17:15:44.359402 114275 kubeadm.go:310] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I0916 17:15:44.369095 114275 kubeadm.go:310] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I0916 17:15:44.372519 114275 out.go:235] - Generating certificates and keys ...
I0916 17:15:44.372618 114275 kubeadm.go:310] [certs] Using existing ca certificate authority
I0916 17:15:44.372683 114275 kubeadm.go:310] [certs] Using existing apiserver certificate and key on disk
I0916 17:15:44.634719 114275 kubeadm.go:310] [certs] Generating "apiserver-kubelet-client" certificate and key
I0916 17:15:44.772291 114275 kubeadm.go:310] [certs] Generating "front-proxy-ca" certificate and key
I0916 17:15:44.878962 114275 kubeadm.go:310] [certs] Generating "front-proxy-client" certificate and key
I0916 17:15:45.081025 114275 kubeadm.go:310] [certs] Generating "etcd/ca" certificate and key
I0916 17:15:45.326768 114275 kubeadm.go:310] [certs] Generating "etcd/server" certificate and key
I0916 17:15:45.326916 114275 kubeadm.go:310] [certs] etcd/server serving cert is signed for DNS names [addons-539053 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0916 17:15:45.398207 114275 kubeadm.go:310] [certs] Generating "etcd/peer" certificate and key
I0916 17:15:45.398361 114275 kubeadm.go:310] [certs] etcd/peer serving cert is signed for DNS names [addons-539053 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0916 17:15:45.554448 114275 kubeadm.go:310] [certs] Generating "etcd/healthcheck-client" certificate and key
I0916 17:15:45.677749 114275 kubeadm.go:310] [certs] Generating "apiserver-etcd-client" certificate and key
I0916 17:15:46.159478 114275 kubeadm.go:310] [certs] Generating "sa" key and public key
I0916 17:15:46.159566 114275 kubeadm.go:310] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I0916 17:15:46.270056 114275 kubeadm.go:310] [kubeconfig] Writing "admin.conf" kubeconfig file
I0916 17:15:46.443872 114275 kubeadm.go:310] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I0916 17:15:46.609834 114275 kubeadm.go:310] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I0916 17:15:46.940547 114275 kubeadm.go:310] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I0916 17:15:47.045487 114275 kubeadm.go:310] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I0916 17:15:47.045918 114275 kubeadm.go:310] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I0916 17:15:47.048354 114275 kubeadm.go:310] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I0916 17:15:47.050458 114275 out.go:235] - Booting up control plane ...
I0916 17:15:47.050587 114275 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-apiserver"
I0916 17:15:47.050713 114275 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I0916 17:15:47.050821 114275 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-scheduler"
I0916 17:15:47.063109 114275 kubeadm.go:310] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I0916 17:15:47.067948 114275 kubeadm.go:310] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I0916 17:15:47.067999 114275 kubeadm.go:310] [kubelet-start] Starting the kubelet
I0916 17:15:47.148566 114275 kubeadm.go:310] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I0916 17:15:47.148695 114275 kubeadm.go:310] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I0916 17:15:47.650026 114275 kubeadm.go:310] [kubelet-check] The kubelet is healthy after 501.491673ms
I0916 17:15:47.650155 114275 kubeadm.go:310] [api-check] Waiting for a healthy API server. This can take up to 4m0s
I0916 17:15:52.651336 114275 kubeadm.go:310] [api-check] The API server is healthy after 5.001297149s
I0916 17:15:52.661564 114275 kubeadm.go:310] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I0916 17:15:52.670975 114275 kubeadm.go:310] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I0916 17:15:52.685748 114275 kubeadm.go:310] [upload-certs] Skipping phase. Please see --upload-certs
I0916 17:15:52.685954 114275 kubeadm.go:310] [mark-control-plane] Marking the node addons-539053 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I0916 17:15:52.693597 114275 kubeadm.go:310] [bootstrap-token] Using token: wkr91p.76u2qy72zpjh3bdw
I0916 17:15:52.694945 114275 out.go:235] - Configuring RBAC rules ...
I0916 17:15:52.695086 114275 kubeadm.go:310] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I0916 17:15:52.697509 114275 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I0916 17:15:52.703277 114275 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I0916 17:15:52.705339 114275 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I0916 17:15:52.707417 114275 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I0916 17:15:52.709383 114275 kubeadm.go:310] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I0916 17:15:53.056713 114275 kubeadm.go:310] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I0916 17:15:53.473315 114275 kubeadm.go:310] [addons] Applied essential addon: CoreDNS
I0916 17:15:54.056470 114275 kubeadm.go:310] [addons] Applied essential addon: kube-proxy
I0916 17:15:54.057228 114275 kubeadm.go:310]
I0916 17:15:54.057327 114275 kubeadm.go:310] Your Kubernetes control-plane has initialized successfully!
I0916 17:15:54.057338 114275 kubeadm.go:310]
I0916 17:15:54.057449 114275 kubeadm.go:310] To start using your cluster, you need to run the following as a regular user:
I0916 17:15:54.057459 114275 kubeadm.go:310]
I0916 17:15:54.057492 114275 kubeadm.go:310] mkdir -p $HOME/.kube
I0916 17:15:54.057572 114275 kubeadm.go:310] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I0916 17:15:54.057650 114275 kubeadm.go:310] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I0916 17:15:54.057658 114275 kubeadm.go:310]
I0916 17:15:54.057733 114275 kubeadm.go:310] Alternatively, if you are the root user, you can run:
I0916 17:15:54.057742 114275 kubeadm.go:310]
I0916 17:15:54.057827 114275 kubeadm.go:310] export KUBECONFIG=/etc/kubernetes/admin.conf
I0916 17:15:54.057846 114275 kubeadm.go:310]
I0916 17:15:54.057915 114275 kubeadm.go:310] You should now deploy a pod network to the cluster.
I0916 17:15:54.058023 114275 kubeadm.go:310] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I0916 17:15:54.058153 114275 kubeadm.go:310] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I0916 17:15:54.058165 114275 kubeadm.go:310]
I0916 17:15:54.058262 114275 kubeadm.go:310] You can now join any number of control-plane nodes by copying certificate authorities
I0916 17:15:54.058376 114275 kubeadm.go:310] and service account keys on each node and then running the following as root:
I0916 17:15:54.058388 114275 kubeadm.go:310]
I0916 17:15:54.058527 114275 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token wkr91p.76u2qy72zpjh3bdw \
I0916 17:15:54.058657 114275 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:589aeae3f954a4cf51a4ace60c19de975422082f6bd32e26d54f799babcca0a2 \
I0916 17:15:54.058688 114275 kubeadm.go:310] --control-plane
I0916 17:15:54.058699 114275 kubeadm.go:310]
I0916 17:15:54.058794 114275 kubeadm.go:310] Then you can join any number of worker nodes by running the following on each as root:
I0916 17:15:54.058800 114275 kubeadm.go:310]
I0916 17:15:54.058918 114275 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token wkr91p.76u2qy72zpjh3bdw \
I0916 17:15:54.059069 114275 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:589aeae3f954a4cf51a4ace60c19de975422082f6bd32e26d54f799babcca0a2
I0916 17:15:54.061013 114275 kubeadm.go:310] W0916 17:15:44.289887 1917 common.go:101] your configuration file uses a deprecated API spec: "kubeadm.k8s.io/v1beta3" (kind: "ClusterConfiguration"). Please use 'kubeadm config migrate --old-config old.yaml --new-config new.yaml', which will write the new, similar spec using a newer API version.
I0916 17:15:54.061316 114275 kubeadm.go:310] W0916 17:15:44.290519 1917 common.go:101] your configuration file uses a deprecated API spec: "kubeadm.k8s.io/v1beta3" (kind: "InitConfiguration"). Please use 'kubeadm config migrate --old-config old.yaml --new-config new.yaml', which will write the new, similar spec using a newer API version.
I0916 17:15:54.061554 114275 kubeadm.go:310] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1068-gcp\n", err: exit status 1
I0916 17:15:54.061656 114275 kubeadm.go:310] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I0916 17:15:54.061668 114275 cni.go:84] Creating CNI manager for ""
I0916 17:15:54.061681 114275 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0916 17:15:54.063277 114275 out.go:177] * Configuring bridge CNI (Container Networking Interface) ...
I0916 17:15:54.064301 114275 ssh_runner.go:195] Run: sudo mkdir -p /etc/cni/net.d
I0916 17:15:54.072623 114275 ssh_runner.go:362] scp memory --> /etc/cni/net.d/1-k8s.conflist (496 bytes)
I0916 17:15:54.088071 114275 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I0916 17:15:54.088182 114275 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I0916 17:15:54.088215 114275 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes addons-539053 minikube.k8s.io/updated_at=2024_09_16T17_15_54_0700 minikube.k8s.io/version=v1.34.0 minikube.k8s.io/commit=91d692c919753635ac118b7ed7ae5503b67c63c8 minikube.k8s.io/name=addons-539053 minikube.k8s.io/primary=true
I0916 17:15:54.094865 114275 ops.go:34] apiserver oom_adj: -16
I0916 17:15:54.166162 114275 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0916 17:15:54.666977 114275 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0916 17:15:55.166212 114275 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0916 17:15:55.666245 114275 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0916 17:15:56.166224 114275 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0916 17:15:56.666834 114275 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0916 17:15:57.167095 114275 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0916 17:15:57.667002 114275 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0916 17:15:58.166443 114275 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0916 17:15:58.228536 114275 kubeadm.go:1113] duration metric: took 4.140400385s to wait for elevateKubeSystemPrivileges
I0916 17:15:58.228574 114275 kubeadm.go:394] duration metric: took 14.064783803s to StartCluster
I0916 17:15:58.228598 114275 settings.go:142] acquiring lock: {Name:mkb300bd78b7ad126a3cee4c0691e462d6a68687 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 17:15:58.228737 114275 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/19649-105988/kubeconfig
I0916 17:15:58.229162 114275 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19649-105988/kubeconfig: {Name:mkc274b48c835a365a47726fab379af89963f2b8 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0916 17:15:58.229356 114275 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I0916 17:15:58.229389 114275 start.go:235] Will wait 6m0s for node &{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}
I0916 17:15:58.229446 114275 addons.go:507] enable addons start: toEnable=map[ambassador:false auto-pause:false cloud-spanner:true csi-hostpath-driver:true dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:true gvisor:false headlamp:false helm-tiller:true inaccel:false ingress:true ingress-dns:true inspektor-gadget:true istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:true nvidia-device-plugin:true nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:true registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:true volcano:true volumesnapshots:true yakd:true]
I0916 17:15:58.229584 114275 addons.go:69] Setting yakd=true in profile "addons-539053"
I0916 17:15:58.229606 114275 addons.go:234] Setting addon yakd=true in "addons-539053"
I0916 17:15:58.229606 114275 addons.go:69] Setting inspektor-gadget=true in profile "addons-539053"
I0916 17:15:58.229621 114275 addons.go:69] Setting storage-provisioner=true in profile "addons-539053"
I0916 17:15:58.229652 114275 addons.go:69] Setting storage-provisioner-rancher=true in profile "addons-539053"
I0916 17:15:58.229660 114275 addons.go:69] Setting default-storageclass=true in profile "addons-539053"
I0916 17:15:58.229665 114275 addons.go:234] Setting addon storage-provisioner=true in "addons-539053"
I0916 17:15:58.229671 114275 addons.go:69] Setting volcano=true in profile "addons-539053"
I0916 17:15:58.229675 114275 addons_storage_classes.go:33] enableOrDisableStorageClasses storage-provisioner-rancher=true on "addons-539053"
I0916 17:15:58.229678 114275 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "addons-539053"
I0916 17:15:58.229683 114275 addons.go:234] Setting addon volcano=true in "addons-539053"
I0916 17:15:58.229686 114275 addons.go:69] Setting volumesnapshots=true in profile "addons-539053"
I0916 17:15:58.229690 114275 addons.go:69] Setting nvidia-device-plugin=true in profile "addons-539053"
I0916 17:15:58.229699 114275 addons.go:234] Setting addon volumesnapshots=true in "addons-539053"
I0916 17:15:58.229702 114275 host.go:66] Checking if "addons-539053" exists ...
I0916 17:15:58.229704 114275 host.go:66] Checking if "addons-539053" exists ...
I0916 17:15:58.229708 114275 addons.go:234] Setting addon nvidia-device-plugin=true in "addons-539053"
I0916 17:15:58.229711 114275 addons.go:69] Setting registry=true in profile "addons-539053"
I0916 17:15:58.229724 114275 addons.go:234] Setting addon registry=true in "addons-539053"
I0916 17:15:58.229730 114275 host.go:66] Checking if "addons-539053" exists ...
I0916 17:15:58.229730 114275 host.go:66] Checking if "addons-539053" exists ...
I0916 17:15:58.229743 114275 host.go:66] Checking if "addons-539053" exists ...
I0916 17:15:58.229621 114275 addons.go:69] Setting gcp-auth=true in profile "addons-539053"
I0916 17:15:58.229849 114275 mustload.go:65] Loading cluster: addons-539053
I0916 17:15:58.230004 114275 config.go:182] Loaded profile config "addons-539053": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.31.1
I0916 17:15:58.230060 114275 cli_runner.go:164] Run: docker container inspect addons-539053 --format={{.State.Status}}
I0916 17:15:58.230060 114275 cli_runner.go:164] Run: docker container inspect addons-539053 --format={{.State.Status}}
I0916 17:15:58.230242 114275 cli_runner.go:164] Run: docker container inspect addons-539053 --format={{.State.Status}}
I0916 17:15:58.230246 114275 cli_runner.go:164] Run: docker container inspect addons-539053 --format={{.State.Status}}
I0916 17:15:58.230256 114275 addons.go:69] Setting metrics-server=true in profile "addons-539053"
I0916 17:15:58.230260 114275 cli_runner.go:164] Run: docker container inspect addons-539053 --format={{.State.Status}}
I0916 17:15:58.230271 114275 cli_runner.go:164] Run: docker container inspect addons-539053 --format={{.State.Status}}
I0916 17:15:58.230272 114275 addons.go:234] Setting addon metrics-server=true in "addons-539053"
I0916 17:15:58.230293 114275 cli_runner.go:164] Run: docker container inspect addons-539053 --format={{.State.Status}}
I0916 17:15:58.230303 114275 host.go:66] Checking if "addons-539053" exists ...
I0916 17:15:58.230763 114275 cli_runner.go:164] Run: docker container inspect addons-539053 --format={{.State.Status}}
I0916 17:15:58.229643 114275 host.go:66] Checking if "addons-539053" exists ...
I0916 17:15:58.229638 114275 addons.go:234] Setting addon inspektor-gadget=true in "addons-539053"
I0916 17:15:58.230861 114275 host.go:66] Checking if "addons-539053" exists ...
I0916 17:15:58.229643 114275 addons.go:69] Setting cloud-spanner=true in profile "addons-539053"
I0916 17:15:58.231250 114275 addons.go:234] Setting addon cloud-spanner=true in "addons-539053"
I0916 17:15:58.231280 114275 cli_runner.go:164] Run: docker container inspect addons-539053 --format={{.State.Status}}
I0916 17:15:58.229655 114275 addons.go:69] Setting ingress=true in profile "addons-539053"
I0916 17:15:58.231313 114275 addons.go:234] Setting addon ingress=true in "addons-539053"
I0916 17:15:58.231349 114275 host.go:66] Checking if "addons-539053" exists ...
I0916 17:15:58.231359 114275 host.go:66] Checking if "addons-539053" exists ...
I0916 17:15:58.229658 114275 addons.go:69] Setting ingress-dns=true in profile "addons-539053"
I0916 17:15:58.231669 114275 addons.go:234] Setting addon ingress-dns=true in "addons-539053"
I0916 17:15:58.231721 114275 host.go:66] Checking if "addons-539053" exists ...
I0916 17:15:58.232204 114275 cli_runner.go:164] Run: docker container inspect addons-539053 --format={{.State.Status}}
I0916 17:15:58.232562 114275 out.go:177] * Verifying Kubernetes components...
I0916 17:15:58.233076 114275 cli_runner.go:164] Run: docker container inspect addons-539053 --format={{.State.Status}}
I0916 17:15:58.229652 114275 addons.go:69] Setting csi-hostpath-driver=true in profile "addons-539053"
I0916 17:15:58.234062 114275 addons.go:234] Setting addon csi-hostpath-driver=true in "addons-539053"
I0916 17:15:58.234148 114275 host.go:66] Checking if "addons-539053" exists ...
I0916 17:15:58.234764 114275 cli_runner.go:164] Run: docker container inspect addons-539053 --format={{.State.Status}}
I0916 17:15:58.229649 114275 addons.go:69] Setting helm-tiller=true in profile "addons-539053"
I0916 17:15:58.235871 114275 addons.go:234] Setting addon helm-tiller=true in "addons-539053"
I0916 17:15:58.235916 114275 host.go:66] Checking if "addons-539053" exists ...
I0916 17:15:58.236055 114275 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0916 17:15:58.229642 114275 config.go:182] Loaded profile config "addons-539053": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.31.1
I0916 17:15:58.236396 114275 cli_runner.go:164] Run: docker container inspect addons-539053 --format={{.State.Status}}
I0916 17:15:58.231295 114275 cli_runner.go:164] Run: docker container inspect addons-539053 --format={{.State.Status}}
I0916 17:15:58.255569 114275 host.go:66] Checking if "addons-539053" exists ...
I0916 17:15:58.256442 114275 addons.go:234] Setting addon storage-provisioner-rancher=true in "addons-539053"
I0916 17:15:58.256481 114275 host.go:66] Checking if "addons-539053" exists ...
I0916 17:15:58.256932 114275 cli_runner.go:164] Run: docker container inspect addons-539053 --format={{.State.Status}}
I0916 17:15:58.263129 114275 out.go:177] - Using image registry.k8s.io/metrics-server/metrics-server:v0.7.2
I0916 17:15:58.264549 114275 addons.go:431] installing /etc/kubernetes/addons/metrics-apiservice.yaml
I0916 17:15:58.264573 114275 ssh_runner.go:362] scp metrics-server/metrics-apiservice.yaml --> /etc/kubernetes/addons/metrics-apiservice.yaml (424 bytes)
I0916 17:15:58.264637 114275 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-539053
I0916 17:15:58.274847 114275 cli_runner.go:164] Run: docker container inspect addons-539053 --format={{.State.Status}}
I0916 17:15:58.275411 114275 cli_runner.go:164] Run: docker container inspect addons-539053 --format={{.State.Status}}
I0916 17:15:58.277321 114275 out.go:177] - Using image registry.k8s.io/sig-storage/csi-attacher:v4.0.0
I0916 17:15:58.282003 114275 out.go:177] - Using image gcr.io/k8s-minikube/minikube-ingress-dns:0.0.3
I0916 17:15:58.283351 114275 addons.go:431] installing /etc/kubernetes/addons/ingress-dns-pod.yaml
I0916 17:15:58.283377 114275 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ingress-dns-pod.yaml (2442 bytes)
I0916 17:15:58.283431 114275 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-539053
I0916 17:15:58.283836 114275 addons.go:234] Setting addon default-storageclass=true in "addons-539053"
I0916 17:15:58.283888 114275 host.go:66] Checking if "addons-539053" exists ...
I0916 17:15:58.284325 114275 cli_runner.go:164] Run: docker container inspect addons-539053 --format={{.State.Status}}
I0916 17:15:58.284770 114275 out.go:177] - Using image docker.io/volcanosh/vc-webhook-manager:v1.9.0
I0916 17:15:58.287192 114275 out.go:177] - Using image registry.k8s.io/sig-storage/csi-external-health-monitor-controller:v0.7.0
I0916 17:15:58.287321 114275 out.go:177] - Using image docker.io/volcanosh/vc-controller-manager:v1.9.0
I0916 17:15:58.288598 114275 out.go:177] - Using image registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.6.0
I0916 17:15:58.288730 114275 out.go:177] - Using image docker.io/volcanosh/vc-scheduler:v1.9.0
I0916 17:15:58.291234 114275 addons.go:431] installing /etc/kubernetes/addons/volcano-deployment.yaml
I0916 17:15:58.291260 114275 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/volcano-deployment.yaml (434001 bytes)
I0916 17:15:58.291325 114275 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-539053
I0916 17:15:58.291569 114275 out.go:177] - Using image registry.k8s.io/sig-storage/hostpathplugin:v1.9.0
I0916 17:15:58.292873 114275 out.go:177] - Using image registry.k8s.io/sig-storage/livenessprobe:v2.8.0
I0916 17:15:58.294231 114275 out.go:177] - Using image registry.k8s.io/sig-storage/csi-resizer:v1.6.0
I0916 17:15:58.295384 114275 out.go:177] - Using image registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0
I0916 17:15:58.296507 114275 out.go:177] - Using image registry.k8s.io/sig-storage/csi-provisioner:v3.3.0
I0916 17:15:58.297664 114275 addons.go:431] installing /etc/kubernetes/addons/rbac-external-attacher.yaml
I0916 17:15:58.297682 114275 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-attacher.yaml --> /etc/kubernetes/addons/rbac-external-attacher.yaml (3073 bytes)
I0916 17:15:58.297740 114275 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-539053
I0916 17:15:58.322820 114275 out.go:177] - Using image gcr.io/cloud-spanner-emulator/emulator:1.5.23
I0916 17:15:58.322984 114275 out.go:177] - Using image ghcr.io/helm/tiller:v2.17.0
I0916 17:15:58.324497 114275 addons.go:431] installing /etc/kubernetes/addons/deployment.yaml
I0916 17:15:58.324543 114275 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/deployment.yaml (1004 bytes)
I0916 17:15:58.324606 114275 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-539053
I0916 17:15:58.325002 114275 addons.go:431] installing /etc/kubernetes/addons/helm-tiller-dp.yaml
I0916 17:15:58.325021 114275 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/helm-tiller-dp.yaml (2422 bytes)
I0916 17:15:58.325070 114275 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-539053
I0916 17:15:58.331717 114275 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19649-105988/.minikube/machines/addons-539053/id_rsa Username:docker}
I0916 17:15:58.332838 114275 out.go:177] - Using image gcr.io/k8s-minikube/kube-registry-proxy:0.0.6
I0916 17:15:58.333804 114275 out.go:177] - Using image ghcr.io/inspektor-gadget/inspektor-gadget:v0.32.0
I0916 17:15:58.333823 114275 out.go:177] - Using image docker.io/rancher/local-path-provisioner:v0.0.22
I0916 17:15:58.333875 114275 out.go:177] - Using image registry.k8s.io/sig-storage/snapshot-controller:v6.1.0
I0916 17:15:58.335634 114275 addons.go:431] installing /etc/kubernetes/addons/ig-namespace.yaml
I0916 17:15:58.335655 114275 ssh_runner.go:362] scp inspektor-gadget/ig-namespace.yaml --> /etc/kubernetes/addons/ig-namespace.yaml (55 bytes)
I0916 17:15:58.335701 114275 out.go:177] - Using image registry.k8s.io/ingress-nginx/controller:v1.11.2
I0916 17:15:58.335721 114275 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-539053
I0916 17:15:58.335733 114275 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml
I0916 17:15:58.335758 114275 ssh_runner.go:362] scp volumesnapshots/csi-hostpath-snapshotclass.yaml --> /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml (934 bytes)
I0916 17:15:58.335822 114275 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-539053
I0916 17:15:58.335983 114275 out.go:177] - Using image docker.io/registry:2.8.3
I0916 17:15:58.336970 114275 out.go:177] - Using image docker.io/busybox:stable
I0916 17:15:58.338251 114275 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.3
I0916 17:15:58.338351 114275 addons.go:431] installing /etc/kubernetes/addons/registry-rc.yaml
I0916 17:15:58.338366 114275 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-rc.yaml (860 bytes)
I0916 17:15:58.338387 114275 addons.go:431] installing /etc/kubernetes/addons/storage-provisioner-rancher.yaml
I0916 17:15:58.338404 114275 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner-rancher.yaml (3113 bytes)
I0916 17:15:58.338419 114275 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-539053
I0916 17:15:58.338462 114275 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-539053
I0916 17:15:58.340666 114275 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.3
I0916 17:15:58.342274 114275 addons.go:431] installing /etc/kubernetes/addons/ingress-deploy.yaml
I0916 17:15:58.342300 114275 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ingress-deploy.yaml (16078 bytes)
I0916 17:15:58.342352 114275 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-539053
I0916 17:15:58.344325 114275 out.go:177] - Using image docker.io/marcnuri/yakd:0.0.5
I0916 17:15:58.345656 114275 addons.go:431] installing /etc/kubernetes/addons/yakd-ns.yaml
I0916 17:15:58.345672 114275 ssh_runner.go:362] scp yakd/yakd-ns.yaml --> /etc/kubernetes/addons/yakd-ns.yaml (171 bytes)
I0916 17:15:58.345724 114275 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-539053
I0916 17:15:58.345644 114275 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19649-105988/.minikube/machines/addons-539053/id_rsa Username:docker}
I0916 17:15:58.354231 114275 addons.go:431] installing /etc/kubernetes/addons/storageclass.yaml
I0916 17:15:58.354253 114275 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I0916 17:15:58.354306 114275 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-539053
I0916 17:15:58.354646 114275 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19649-105988/.minikube/machines/addons-539053/id_rsa Username:docker}
I0916 17:15:58.366057 114275 out.go:177] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I0916 17:15:58.367486 114275 addons.go:431] installing /etc/kubernetes/addons/storage-provisioner.yaml
I0916 17:15:58.367506 114275 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I0916 17:15:58.367560 114275 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-539053
I0916 17:15:58.371128 114275 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19649-105988/.minikube/machines/addons-539053/id_rsa Username:docker}
I0916 17:15:58.386908 114275 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19649-105988/.minikube/machines/addons-539053/id_rsa Username:docker}
I0916 17:15:58.396288 114275 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19649-105988/.minikube/machines/addons-539053/id_rsa Username:docker}
I0916 17:15:58.400986 114275 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19649-105988/.minikube/machines/addons-539053/id_rsa Username:docker}
I0916 17:15:58.401388 114275 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19649-105988/.minikube/machines/addons-539053/id_rsa Username:docker}
I0916 17:15:58.401743 114275 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19649-105988/.minikube/machines/addons-539053/id_rsa Username:docker}
I0916 17:15:58.402394 114275 out.go:177] - Using image nvcr.io/nvidia/k8s-device-plugin:v0.16.2
I0916 17:15:58.403659 114275 addons.go:431] installing /etc/kubernetes/addons/nvidia-device-plugin.yaml
I0916 17:15:58.403676 114275 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/nvidia-device-plugin.yaml (1966 bytes)
I0916 17:15:58.403734 114275 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-539053
I0916 17:15:58.408380 114275 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19649-105988/.minikube/machines/addons-539053/id_rsa Username:docker}
I0916 17:15:58.410854 114275 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19649-105988/.minikube/machines/addons-539053/id_rsa Username:docker}
I0916 17:15:58.411213 114275 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19649-105988/.minikube/machines/addons-539053/id_rsa Username:docker}
I0916 17:15:58.412442 114275 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19649-105988/.minikube/machines/addons-539053/id_rsa Username:docker}
I0916 17:15:58.422554 114275 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19649-105988/.minikube/machines/addons-539053/id_rsa Username:docker}
I0916 17:15:58.423283 114275 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19649-105988/.minikube/machines/addons-539053/id_rsa Username:docker}
W0916 17:15:58.450239 114275 sshutil.go:64] dial failure (will retry): ssh: handshake failed: EOF
I0916 17:15:58.450335 114275 retry.go:31] will retry after 161.166086ms: ssh: handshake failed: EOF
I0916 17:15:58.553333 114275 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I0916 17:15:58.553477 114275 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0916 17:15:58.758279 114275 addons.go:431] installing /etc/kubernetes/addons/metrics-server-deployment.yaml
I0916 17:15:58.758372 114275 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-deployment.yaml (1907 bytes)
I0916 17:15:58.766927 114275 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ingress-dns-pod.yaml
I0916 17:15:58.854520 114275 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner-rancher.yaml
I0916 17:15:58.964121 114275 addons.go:431] installing /etc/kubernetes/addons/rbac-hostpath.yaml
I0916 17:15:58.964220 114275 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-hostpath.yaml --> /etc/kubernetes/addons/rbac-hostpath.yaml (4266 bytes)
I0916 17:15:58.969170 114275 addons.go:431] installing /etc/kubernetes/addons/registry-svc.yaml
I0916 17:15:58.969191 114275 ssh_runner.go:362] scp registry/registry-svc.yaml --> /etc/kubernetes/addons/registry-svc.yaml (398 bytes)
I0916 17:15:59.060112 114275 addons.go:431] installing /etc/kubernetes/addons/yakd-sa.yaml
I0916 17:15:59.060154 114275 ssh_runner.go:362] scp yakd/yakd-sa.yaml --> /etc/kubernetes/addons/yakd-sa.yaml (247 bytes)
I0916 17:15:59.061575 114275 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I0916 17:15:59.062635 114275 addons.go:431] installing /etc/kubernetes/addons/metrics-server-rbac.yaml
I0916 17:15:59.062660 114275 ssh_runner.go:362] scp metrics-server/metrics-server-rbac.yaml --> /etc/kubernetes/addons/metrics-server-rbac.yaml (2175 bytes)
I0916 17:15:59.148947 114275 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ingress-deploy.yaml
I0916 17:15:59.151431 114275 addons.go:431] installing /etc/kubernetes/addons/ig-serviceaccount.yaml
I0916 17:15:59.151459 114275 ssh_runner.go:362] scp inspektor-gadget/ig-serviceaccount.yaml --> /etc/kubernetes/addons/ig-serviceaccount.yaml (80 bytes)
I0916 17:15:59.152354 114275 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/nvidia-device-plugin.yaml
I0916 17:15:59.162092 114275 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/deployment.yaml
I0916 17:15:59.168023 114275 addons.go:431] installing /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml
I0916 17:15:59.168047 114275 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-health-monitor-controller.yaml --> /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml (3038 bytes)
I0916 17:15:59.248152 114275 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I0916 17:15:59.248284 114275 addons.go:431] installing /etc/kubernetes/addons/helm-tiller-rbac.yaml
I0916 17:15:59.248296 114275 ssh_runner.go:362] scp helm-tiller/helm-tiller-rbac.yaml --> /etc/kubernetes/addons/helm-tiller-rbac.yaml (1188 bytes)
I0916 17:15:59.261070 114275 addons.go:431] installing /etc/kubernetes/addons/yakd-crb.yaml
I0916 17:15:59.261094 114275 ssh_runner.go:362] scp yakd/yakd-crb.yaml --> /etc/kubernetes/addons/yakd-crb.yaml (422 bytes)
I0916 17:15:59.351018 114275 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/volcano-deployment.yaml
I0916 17:15:59.356787 114275 addons.go:431] installing /etc/kubernetes/addons/ig-role.yaml
I0916 17:15:59.356818 114275 ssh_runner.go:362] scp inspektor-gadget/ig-role.yaml --> /etc/kubernetes/addons/ig-role.yaml (210 bytes)
I0916 17:15:59.358072 114275 addons.go:431] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml
I0916 17:15:59.358101 114275 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshotclasses.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml (6471 bytes)
I0916 17:15:59.364853 114275 addons.go:431] installing /etc/kubernetes/addons/metrics-server-service.yaml
I0916 17:15:59.364877 114275 ssh_runner.go:362] scp metrics-server/metrics-server-service.yaml --> /etc/kubernetes/addons/metrics-server-service.yaml (446 bytes)
I0916 17:15:59.450119 114275 addons.go:431] installing /etc/kubernetes/addons/registry-proxy.yaml
I0916 17:15:59.450204 114275 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-proxy.yaml (947 bytes)
I0916 17:15:59.548311 114275 addons.go:431] installing /etc/kubernetes/addons/yakd-svc.yaml
I0916 17:15:59.548341 114275 ssh_runner.go:362] scp yakd/yakd-svc.yaml --> /etc/kubernetes/addons/yakd-svc.yaml (412 bytes)
I0916 17:15:59.556366 114275 addons.go:431] installing /etc/kubernetes/addons/helm-tiller-svc.yaml
I0916 17:15:59.556397 114275 ssh_runner.go:362] scp helm-tiller/helm-tiller-svc.yaml --> /etc/kubernetes/addons/helm-tiller-svc.yaml (951 bytes)
I0916 17:15:59.567166 114275 addons.go:431] installing /etc/kubernetes/addons/ig-rolebinding.yaml
I0916 17:15:59.567196 114275 ssh_runner.go:362] scp inspektor-gadget/ig-rolebinding.yaml --> /etc/kubernetes/addons/ig-rolebinding.yaml (244 bytes)
I0916 17:15:59.570082 114275 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
I0916 17:15:59.646733 114275 addons.go:431] installing /etc/kubernetes/addons/rbac-external-provisioner.yaml
I0916 17:15:59.646774 114275 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-provisioner.yaml --> /etc/kubernetes/addons/rbac-external-provisioner.yaml (4442 bytes)
I0916 17:15:59.766463 114275 addons.go:431] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml
I0916 17:15:59.766550 114275 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshotcontents.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml (23126 bytes)
I0916 17:15:59.948954 114275 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/registry-rc.yaml -f /etc/kubernetes/addons/registry-svc.yaml -f /etc/kubernetes/addons/registry-proxy.yaml
I0916 17:16:00.156079 114275 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/helm-tiller-dp.yaml -f /etc/kubernetes/addons/helm-tiller-rbac.yaml -f /etc/kubernetes/addons/helm-tiller-svc.yaml
I0916 17:16:00.166651 114275 ssh_runner.go:235] Completed: sudo systemctl start kubelet: (1.61313512s)
I0916 17:16:00.167716 114275 node_ready.go:35] waiting up to 6m0s for node "addons-539053" to be "Ready" ...
I0916 17:16:00.167980 114275 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -": (1.614608844s)
I0916 17:16:00.168009 114275 start.go:971] {"host.minikube.internal": 192.168.49.1} host record injected into CoreDNS's ConfigMap
I0916 17:16:00.170431 114275 addons.go:431] installing /etc/kubernetes/addons/yakd-dp.yaml
I0916 17:16:00.170455 114275 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/yakd-dp.yaml (2017 bytes)
I0916 17:16:00.249698 114275 node_ready.go:49] node "addons-539053" has status "Ready":"True"
I0916 17:16:00.249738 114275 node_ready.go:38] duration metric: took 81.979855ms for node "addons-539053" to be "Ready" ...
I0916 17:16:00.249754 114275 pod_ready.go:36] extra waiting up to 6m0s for all system-critical pods including labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0916 17:16:00.260536 114275 pod_ready.go:79] waiting up to 6m0s for pod "coredns-7c65d6cfc9-sx2j8" in "kube-system" namespace to be "Ready" ...
I0916 17:16:00.268953 114275 addons.go:431] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml
I0916 17:16:00.269047 114275 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshots.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml (19582 bytes)
I0916 17:16:00.546856 114275 addons.go:431] installing /etc/kubernetes/addons/rbac-external-resizer.yaml
I0916 17:16:00.546891 114275 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-resizer.yaml --> /etc/kubernetes/addons/rbac-external-resizer.yaml (2943 bytes)
I0916 17:16:00.558539 114275 addons.go:431] installing /etc/kubernetes/addons/ig-clusterrole.yaml
I0916 17:16:00.558568 114275 ssh_runner.go:362] scp inspektor-gadget/ig-clusterrole.yaml --> /etc/kubernetes/addons/ig-clusterrole.yaml (1485 bytes)
I0916 17:16:00.748688 114275 kapi.go:214] "coredns" deployment in "kube-system" namespace and "addons-539053" context rescaled to 1 replicas
I0916 17:16:00.851126 114275 addons.go:431] installing /etc/kubernetes/addons/ig-clusterrolebinding.yaml
I0916 17:16:00.851220 114275 ssh_runner.go:362] scp inspektor-gadget/ig-clusterrolebinding.yaml --> /etc/kubernetes/addons/ig-clusterrolebinding.yaml (274 bytes)
I0916 17:16:01.064374 114275 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/yakd-ns.yaml -f /etc/kubernetes/addons/yakd-sa.yaml -f /etc/kubernetes/addons/yakd-crb.yaml -f /etc/kubernetes/addons/yakd-svc.yaml -f /etc/kubernetes/addons/yakd-dp.yaml
I0916 17:16:01.252735 114275 addons.go:431] installing /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml
I0916 17:16:01.252822 114275 ssh_runner.go:362] scp volumesnapshots/rbac-volume-snapshot-controller.yaml --> /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml (3545 bytes)
I0916 17:16:01.347799 114275 addons.go:431] installing /etc/kubernetes/addons/rbac-external-snapshotter.yaml
I0916 17:16:01.347843 114275 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-snapshotter.yaml --> /etc/kubernetes/addons/rbac-external-snapshotter.yaml (3149 bytes)
I0916 17:16:01.567646 114275 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ingress-dns-pod.yaml: (2.800608497s)
I0916 17:16:01.746613 114275 addons.go:431] installing /etc/kubernetes/addons/ig-crd.yaml
I0916 17:16:01.746760 114275 ssh_runner.go:362] scp inspektor-gadget/ig-crd.yaml --> /etc/kubernetes/addons/ig-crd.yaml (5216 bytes)
I0916 17:16:01.850264 114275 addons.go:431] installing /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I0916 17:16:01.850303 114275 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml (1475 bytes)
I0916 17:16:01.961143 114275 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-attacher.yaml
I0916 17:16:01.961188 114275 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-attacher.yaml (2143 bytes)
I0916 17:16:02.155656 114275 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I0916 17:16:02.266640 114275 pod_ready.go:103] pod "coredns-7c65d6cfc9-sx2j8" in "kube-system" namespace has status "Ready":"False"
I0916 17:16:02.356134 114275 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml
I0916 17:16:02.356226 114275 ssh_runner.go:362] scp csi-hostpath-driver/deploy/csi-hostpath-driverinfo.yaml --> /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml (1274 bytes)
I0916 17:16:02.667758 114275 addons.go:431] installing /etc/kubernetes/addons/ig-daemonset.yaml
I0916 17:16:02.667800 114275 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-daemonset.yaml (7735 bytes)
I0916 17:16:03.050023 114275 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-plugin.yaml
I0916 17:16:03.050054 114275 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-plugin.yaml (8201 bytes)
I0916 17:16:03.257779 114275 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ig-namespace.yaml -f /etc/kubernetes/addons/ig-serviceaccount.yaml -f /etc/kubernetes/addons/ig-role.yaml -f /etc/kubernetes/addons/ig-rolebinding.yaml -f /etc/kubernetes/addons/ig-clusterrole.yaml -f /etc/kubernetes/addons/ig-clusterrolebinding.yaml -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-daemonset.yaml
I0916 17:16:03.261496 114275 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-resizer.yaml
I0916 17:16:03.261649 114275 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-resizer.yaml (2191 bytes)
I0916 17:16:04.066385 114275 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-storageclass.yaml
I0916 17:16:04.066487 114275 ssh_runner.go:362] scp csi-hostpath-driver/deploy/csi-hostpath-storageclass.yaml --> /etc/kubernetes/addons/csi-hostpath-storageclass.yaml (846 bytes)
I0916 17:16:04.161477 114275 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner-rancher.yaml: (5.306911431s)
I0916 17:16:04.161963 114275 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (5.100341288s)
I0916 17:16:04.349226 114275 pod_ready.go:103] pod "coredns-7c65d6cfc9-sx2j8" in "kube-system" namespace has status "Ready":"False"
I0916 17:16:04.465727 114275 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/rbac-external-attacher.yaml -f /etc/kubernetes/addons/rbac-hostpath.yaml -f /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml -f /etc/kubernetes/addons/rbac-external-provisioner.yaml -f /etc/kubernetes/addons/rbac-external-resizer.yaml -f /etc/kubernetes/addons/rbac-external-snapshotter.yaml -f /etc/kubernetes/addons/csi-hostpath-attacher.yaml -f /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml -f /etc/kubernetes/addons/csi-hostpath-plugin.yaml -f /etc/kubernetes/addons/csi-hostpath-resizer.yaml -f /etc/kubernetes/addons/csi-hostpath-storageclass.yaml
I0916 17:16:05.263268 114275 ssh_runner.go:362] scp memory --> /var/lib/minikube/google_application_credentials.json (162 bytes)
I0916 17:16:05.263421 114275 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-539053
I0916 17:16:05.283493 114275 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19649-105988/.minikube/machines/addons-539053/id_rsa Username:docker}
I0916 17:16:06.347763 114275 ssh_runner.go:362] scp memory --> /var/lib/minikube/google_cloud_project (12 bytes)
I0916 17:16:06.658673 114275 addons.go:234] Setting addon gcp-auth=true in "addons-539053"
I0916 17:16:06.658776 114275 host.go:66] Checking if "addons-539053" exists ...
I0916 17:16:06.659323 114275 cli_runner.go:164] Run: docker container inspect addons-539053 --format={{.State.Status}}
I0916 17:16:06.681496 114275 ssh_runner.go:195] Run: cat /var/lib/minikube/google_application_credentials.json
I0916 17:16:06.681547 114275 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-539053
I0916 17:16:06.696551 114275 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19649-105988/.minikube/machines/addons-539053/id_rsa Username:docker}
I0916 17:16:06.768761 114275 pod_ready.go:103] pod "coredns-7c65d6cfc9-sx2j8" in "kube-system" namespace has status "Ready":"False"
I0916 17:16:08.070980 114275 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ingress-deploy.yaml: (8.921987188s)
I0916 17:16:08.071131 114275 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: (8.822954297s)
I0916 17:16:08.071150 114275 addons.go:475] Verifying addon ingress=true in "addons-539053"
I0916 17:16:08.071048 114275 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/nvidia-device-plugin.yaml: (8.918659237s)
I0916 17:16:08.071093 114275 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/deployment.yaml: (8.908979389s)
I0916 17:16:08.072488 114275 out.go:177] * Verifying ingress addon...
I0916 17:16:08.074317 114275 kapi.go:75] Waiting for pod with label "app.kubernetes.io/name=ingress-nginx" in ns "ingress-nginx" ...
I0916 17:16:08.150348 114275 kapi.go:86] Found 3 Pods for label selector app.kubernetes.io/name=ingress-nginx
I0916 17:16:08.150445 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:08.581151 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:09.153378 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:09.267620 114275 pod_ready.go:103] pod "coredns-7c65d6cfc9-sx2j8" in "kube-system" namespace has status "Ready":"False"
I0916 17:16:09.653976 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:10.156408 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:10.651562 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:10.952109 114275 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/volcano-deployment.yaml: (11.601045056s)
I0916 17:16:10.952402 114275 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/helm-tiller-dp.yaml -f /etc/kubernetes/addons/helm-tiller-rbac.yaml -f /etc/kubernetes/addons/helm-tiller-svc.yaml: (10.796224349s)
I0916 17:16:10.952460 114275 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/yakd-ns.yaml -f /etc/kubernetes/addons/yakd-sa.yaml -f /etc/kubernetes/addons/yakd-crb.yaml -f /etc/kubernetes/addons/yakd-svc.yaml -f /etc/kubernetes/addons/yakd-dp.yaml: (9.887995671s)
I0916 17:16:10.952555 114275 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: (8.796793939s)
W0916 17:16:10.953229 114275 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: Process exited with status 1
stdout:
customresourcedefinition.apiextensions.k8s.io/volumesnapshotclasses.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshotcontents.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshots.snapshot.storage.k8s.io created
serviceaccount/snapshot-controller created
clusterrole.rbac.authorization.k8s.io/snapshot-controller-runner created
clusterrolebinding.rbac.authorization.k8s.io/snapshot-controller-role created
role.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
rolebinding.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
deployment.apps/snapshot-controller created
stderr:
error: resource mapping not found for name: "csi-hostpath-snapclass" namespace: "" from "/etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml": no matches for kind "VolumeSnapshotClass" in version "snapshot.storage.k8s.io/v1"
ensure CRDs are installed first
I0916 17:16:10.953270 114275 retry.go:31] will retry after 166.830445ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: Process exited with status 1
stdout:
customresourcedefinition.apiextensions.k8s.io/volumesnapshotclasses.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshotcontents.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshots.snapshot.storage.k8s.io created
serviceaccount/snapshot-controller created
clusterrole.rbac.authorization.k8s.io/snapshot-controller-runner created
clusterrolebinding.rbac.authorization.k8s.io/snapshot-controller-role created
role.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
rolebinding.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
deployment.apps/snapshot-controller created
stderr:
error: resource mapping not found for name: "csi-hostpath-snapclass" namespace: "" from "/etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml": no matches for kind "VolumeSnapshotClass" in version "snapshot.storage.k8s.io/v1"
ensure CRDs are installed first
I0916 17:16:10.952631 114275 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ig-namespace.yaml -f /etc/kubernetes/addons/ig-serviceaccount.yaml -f /etc/kubernetes/addons/ig-role.yaml -f /etc/kubernetes/addons/ig-rolebinding.yaml -f /etc/kubernetes/addons/ig-clusterrole.yaml -f /etc/kubernetes/addons/ig-clusterrolebinding.yaml -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-daemonset.yaml: (7.694760678s)
I0916 17:16:10.952785 114275 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/registry-rc.yaml -f /etc/kubernetes/addons/registry-svc.yaml -f /etc/kubernetes/addons/registry-proxy.yaml: (11.003279335s)
I0916 17:16:10.953348 114275 addons.go:475] Verifying addon registry=true in "addons-539053"
I0916 17:16:10.952916 114275 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: (11.382148612s)
I0916 17:16:10.953682 114275 addons.go:475] Verifying addon metrics-server=true in "addons-539053"
I0916 17:16:10.955237 114275 out.go:177] * To access YAKD - Kubernetes Dashboard, wait for Pod to be ready and run the following command:
minikube -p addons-539053 service yakd-dashboard -n yakd-dashboard
I0916 17:16:10.956063 114275 out.go:177] * Verifying registry addon...
I0916 17:16:10.958147 114275 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=registry" in ns "kube-system" ...
I0916 17:16:10.962460 114275 kapi.go:86] Found 2 Pods for label selector kubernetes.io/minikube-addons=registry
I0916 17:16:10.962479 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:11.120505 114275 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply --force -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I0916 17:16:11.155489 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:11.463325 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:11.657125 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:11.768589 114275 pod_ready.go:98] pod "coredns-7c65d6cfc9-sx2j8" in "kube-system" namespace has status phase "Succeeded" (skipping!): {Phase:Succeeded Conditions:[{Type:PodReadyToStartContainers Status:False LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-09-16 17:16:11 +0000 UTC Reason: Message:} {Type:Initialized Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-09-16 17:15:58 +0000 UTC Reason:PodCompleted Message:} {Type:Ready Status:False LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-09-16 17:15:58 +0000 UTC Reason:PodCompleted Message:} {Type:ContainersReady Status:False LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-09-16 17:15:58 +0000 UTC Reason:PodCompleted Message:} {Type:PodScheduled Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-09-16 17:15:58 +0000 UTC Reason: Message:}] Message: Reason: NominatedNodeName: HostIP:192.168.49.2 HostIPs:[{IP:192.168.49.2
}] PodIP:10.244.0.2 PodIPs:[{IP:10.244.0.2}] StartTime:2024-09-16 17:15:58 +0000 UTC InitContainerStatuses:[] ContainerStatuses:[{Name:coredns State:{Waiting:nil Running:nil Terminated:&ContainerStateTerminated{ExitCode:0,Signal:0,Reason:Completed,Message:,StartedAt:2024-09-16 17:16:02 +0000 UTC,FinishedAt:2024-09-16 17:16:09 +0000 UTC,ContainerID:docker://532fd6f586dd17aef593b85bab83ad072e43ac5788f8b0d8e0c848392d3fb04e,}} LastTerminationState:{Waiting:nil Running:nil Terminated:nil} Ready:false RestartCount:0 Image:registry.k8s.io/coredns/coredns:v1.11.3 ImageID:docker-pullable://registry.k8s.io/coredns/coredns@sha256:9caabbf6238b189a65d0d6e6ac138de60d6a1c419e5a341fbbb7c78382559c6e ContainerID:docker://532fd6f586dd17aef593b85bab83ad072e43ac5788f8b0d8e0c848392d3fb04e Started:0xc00227c1e0 AllocatedResources:map[] Resources:nil VolumeMounts:[{Name:config-volume MountPath:/etc/coredns ReadOnly:true RecursiveReadOnly:0xc00223eb50} {Name:kube-api-access-x7lq2 MountPath:/var/run/secrets/kubernetes.io/serviceaccount
ReadOnly:true RecursiveReadOnly:0xc00223eb60}] User:nil AllocatedResourcesStatus:[]}] QOSClass:Burstable EphemeralContainerStatuses:[] Resize: ResourceClaimStatuses:[]}
I0916 17:16:11.768632 114275 pod_ready.go:82] duration metric: took 11.507969529s for pod "coredns-7c65d6cfc9-sx2j8" in "kube-system" namespace to be "Ready" ...
E0916 17:16:11.768647 114275 pod_ready.go:67] WaitExtra: waitPodCondition: pod "coredns-7c65d6cfc9-sx2j8" in "kube-system" namespace has status phase "Succeeded" (skipping!): {Phase:Succeeded Conditions:[{Type:PodReadyToStartContainers Status:False LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-09-16 17:16:11 +0000 UTC Reason: Message:} {Type:Initialized Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-09-16 17:15:58 +0000 UTC Reason:PodCompleted Message:} {Type:Ready Status:False LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-09-16 17:15:58 +0000 UTC Reason:PodCompleted Message:} {Type:ContainersReady Status:False LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-09-16 17:15:58 +0000 UTC Reason:PodCompleted Message:} {Type:PodScheduled Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2024-09-16 17:15:58 +0000 UTC Reason: Message:}] Message: Reason: NominatedNodeName: HostIP:192.168.4
9.2 HostIPs:[{IP:192.168.49.2}] PodIP:10.244.0.2 PodIPs:[{IP:10.244.0.2}] StartTime:2024-09-16 17:15:58 +0000 UTC InitContainerStatuses:[] ContainerStatuses:[{Name:coredns State:{Waiting:nil Running:nil Terminated:&ContainerStateTerminated{ExitCode:0,Signal:0,Reason:Completed,Message:,StartedAt:2024-09-16 17:16:02 +0000 UTC,FinishedAt:2024-09-16 17:16:09 +0000 UTC,ContainerID:docker://532fd6f586dd17aef593b85bab83ad072e43ac5788f8b0d8e0c848392d3fb04e,}} LastTerminationState:{Waiting:nil Running:nil Terminated:nil} Ready:false RestartCount:0 Image:registry.k8s.io/coredns/coredns:v1.11.3 ImageID:docker-pullable://registry.k8s.io/coredns/coredns@sha256:9caabbf6238b189a65d0d6e6ac138de60d6a1c419e5a341fbbb7c78382559c6e ContainerID:docker://532fd6f586dd17aef593b85bab83ad072e43ac5788f8b0d8e0c848392d3fb04e Started:0xc00227c1e0 AllocatedResources:map[] Resources:nil VolumeMounts:[{Name:config-volume MountPath:/etc/coredns ReadOnly:true RecursiveReadOnly:0xc00223eb50} {Name:kube-api-access-x7lq2 MountPath:/var/run/secrets
/kubernetes.io/serviceaccount ReadOnly:true RecursiveReadOnly:0xc00223eb60}] User:nil AllocatedResourcesStatus:[]}] QOSClass:Burstable EphemeralContainerStatuses:[] Resize: ResourceClaimStatuses:[]}
I0916 17:16:11.768659 114275 pod_ready.go:79] waiting up to 6m0s for pod "coredns-7c65d6cfc9-wnhjq" in "kube-system" namespace to be "Ready" ...
I0916 17:16:11.850327 114275 pod_ready.go:93] pod "coredns-7c65d6cfc9-wnhjq" in "kube-system" namespace has status "Ready":"True"
I0916 17:16:11.850360 114275 pod_ready.go:82] duration metric: took 81.69113ms for pod "coredns-7c65d6cfc9-wnhjq" in "kube-system" namespace to be "Ready" ...
I0916 17:16:11.850374 114275 pod_ready.go:79] waiting up to 6m0s for pod "etcd-addons-539053" in "kube-system" namespace to be "Ready" ...
I0916 17:16:11.856131 114275 pod_ready.go:93] pod "etcd-addons-539053" in "kube-system" namespace has status "Ready":"True"
I0916 17:16:11.856161 114275 pod_ready.go:82] duration metric: took 5.778739ms for pod "etcd-addons-539053" in "kube-system" namespace to be "Ready" ...
I0916 17:16:11.856189 114275 pod_ready.go:79] waiting up to 6m0s for pod "kube-apiserver-addons-539053" in "kube-system" namespace to be "Ready" ...
I0916 17:16:11.864310 114275 pod_ready.go:93] pod "kube-apiserver-addons-539053" in "kube-system" namespace has status "Ready":"True"
I0916 17:16:11.864396 114275 pod_ready.go:82] duration metric: took 8.195217ms for pod "kube-apiserver-addons-539053" in "kube-system" namespace to be "Ready" ...
I0916 17:16:11.864427 114275 pod_ready.go:79] waiting up to 6m0s for pod "kube-controller-manager-addons-539053" in "kube-system" namespace to be "Ready" ...
I0916 17:16:11.949910 114275 pod_ready.go:93] pod "kube-controller-manager-addons-539053" in "kube-system" namespace has status "Ready":"True"
I0916 17:16:11.950027 114275 pod_ready.go:82] duration metric: took 85.576621ms for pod "kube-controller-manager-addons-539053" in "kube-system" namespace to be "Ready" ...
I0916 17:16:11.950092 114275 pod_ready.go:79] waiting up to 6m0s for pod "kube-proxy-bbn89" in "kube-system" namespace to be "Ready" ...
I0916 17:16:11.962653 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:11.973866 114275 ssh_runner.go:235] Completed: cat /var/lib/minikube/google_application_credentials.json: (5.292343012s)
I0916 17:16:11.974099 114275 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/rbac-external-attacher.yaml -f /etc/kubernetes/addons/rbac-hostpath.yaml -f /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml -f /etc/kubernetes/addons/rbac-external-provisioner.yaml -f /etc/kubernetes/addons/rbac-external-resizer.yaml -f /etc/kubernetes/addons/rbac-external-snapshotter.yaml -f /etc/kubernetes/addons/csi-hostpath-attacher.yaml -f /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml -f /etc/kubernetes/addons/csi-hostpath-plugin.yaml -f /etc/kubernetes/addons/csi-hostpath-resizer.yaml -f /etc/kubernetes/addons/csi-hostpath-storageclass.yaml: (7.508031302s)
I0916 17:16:11.974144 114275 addons.go:475] Verifying addon csi-hostpath-driver=true in "addons-539053"
I0916 17:16:11.976300 114275 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.3
I0916 17:16:11.976310 114275 out.go:177] * Verifying csi-hostpath-driver addon...
I0916 17:16:11.977780 114275 out.go:177] - Using image gcr.io/k8s-minikube/gcp-auth-webhook:v0.1.2
I0916 17:16:11.978646 114275 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=csi-hostpath-driver" in ns "kube-system" ...
I0916 17:16:11.978885 114275 addons.go:431] installing /etc/kubernetes/addons/gcp-auth-ns.yaml
I0916 17:16:11.978901 114275 ssh_runner.go:362] scp gcp-auth/gcp-auth-ns.yaml --> /etc/kubernetes/addons/gcp-auth-ns.yaml (700 bytes)
I0916 17:16:12.050785 114275 kapi.go:86] Found 3 Pods for label selector kubernetes.io/minikube-addons=csi-hostpath-driver
I0916 17:16:12.050815 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:12.149845 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:12.152680 114275 addons.go:431] installing /etc/kubernetes/addons/gcp-auth-service.yaml
I0916 17:16:12.152703 114275 ssh_runner.go:362] scp gcp-auth/gcp-auth-service.yaml --> /etc/kubernetes/addons/gcp-auth-service.yaml (788 bytes)
I0916 17:16:12.164823 114275 pod_ready.go:93] pod "kube-proxy-bbn89" in "kube-system" namespace has status "Ready":"True"
I0916 17:16:12.164850 114275 pod_ready.go:82] duration metric: took 214.728667ms for pod "kube-proxy-bbn89" in "kube-system" namespace to be "Ready" ...
I0916 17:16:12.164863 114275 pod_ready.go:79] waiting up to 6m0s for pod "kube-scheduler-addons-539053" in "kube-system" namespace to be "Ready" ...
I0916 17:16:12.178787 114275 addons.go:431] installing /etc/kubernetes/addons/gcp-auth-webhook.yaml
I0916 17:16:12.178812 114275 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/gcp-auth-webhook.yaml (5421 bytes)
I0916 17:16:12.265022 114275 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/gcp-auth-ns.yaml -f /etc/kubernetes/addons/gcp-auth-service.yaml -f /etc/kubernetes/addons/gcp-auth-webhook.yaml
I0916 17:16:12.462212 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:12.564283 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:12.565231 114275 pod_ready.go:93] pod "kube-scheduler-addons-539053" in "kube-system" namespace has status "Ready":"True"
I0916 17:16:12.565256 114275 pod_ready.go:82] duration metric: took 400.384397ms for pod "kube-scheduler-addons-539053" in "kube-system" namespace to be "Ready" ...
I0916 17:16:12.565267 114275 pod_ready.go:39] duration metric: took 12.315483377s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0916 17:16:12.565292 114275 api_server.go:52] waiting for apiserver process to appear ...
I0916 17:16:12.565358 114275 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0916 17:16:12.579458 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:12.963319 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:13.050480 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:13.149704 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:13.462760 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:13.563585 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:13.656691 114275 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply --force -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: (2.536136379s)
I0916 17:16:13.658384 114275 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/gcp-auth-ns.yaml -f /etc/kubernetes/addons/gcp-auth-service.yaml -f /etc/kubernetes/addons/gcp-auth-webhook.yaml: (1.393321536s)
I0916 17:16:13.658427 114275 ssh_runner.go:235] Completed: sudo pgrep -xnf kube-apiserver.*minikube.*: (1.09304533s)
I0916 17:16:13.658467 114275 api_server.go:72] duration metric: took 15.429047794s to wait for apiserver process to appear ...
I0916 17:16:13.658477 114275 api_server.go:88] waiting for apiserver healthz status ...
I0916 17:16:13.658501 114275 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8443/healthz ...
I0916 17:16:13.660310 114275 addons.go:475] Verifying addon gcp-auth=true in "addons-539053"
I0916 17:16:13.663254 114275 api_server.go:279] https://192.168.49.2:8443/healthz returned 200:
ok
I0916 17:16:13.663435 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:13.664089 114275 out.go:177] * Verifying gcp-auth addon...
I0916 17:16:13.664335 114275 api_server.go:141] control plane version: v1.31.1
I0916 17:16:13.664413 114275 api_server.go:131] duration metric: took 5.926461ms to wait for apiserver health ...
I0916 17:16:13.664427 114275 system_pods.go:43] waiting for kube-system pods to appear ...
I0916 17:16:13.666361 114275 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=gcp-auth" in ns "gcp-auth" ...
I0916 17:16:13.762776 114275 kapi.go:86] Found 0 Pods for label selector kubernetes.io/minikube-addons=gcp-auth
I0916 17:16:13.769572 114275 system_pods.go:59] 18 kube-system pods found
I0916 17:16:13.769608 114275 system_pods.go:61] "coredns-7c65d6cfc9-wnhjq" [4245cb95-20b3-46fb-aed8-179d0f82e5d7] Running
I0916 17:16:13.769621 114275 system_pods.go:61] "csi-hostpath-attacher-0" [d988b7d5-120c-40e5-81c7-be8d1ed5f1ce] Pending / Ready:ContainersNotReady (containers with unready status: [csi-attacher]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-attacher])
I0916 17:16:13.769632 114275 system_pods.go:61] "csi-hostpath-resizer-0" [a718b743-e8df-48b9-9179-a2d18a674210] Pending / Ready:ContainersNotReady (containers with unready status: [csi-resizer]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-resizer])
I0916 17:16:13.769641 114275 system_pods.go:61] "csi-hostpathplugin-z7svh" [e382009f-305c-4940-934c-eefa26c102c2] Pending / Ready:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter])
I0916 17:16:13.769651 114275 system_pods.go:61] "etcd-addons-539053" [e815e4d8-c792-4ee9-b17b-ea384470c094] Running
I0916 17:16:13.769657 114275 system_pods.go:61] "kube-apiserver-addons-539053" [978344af-4506-4d7b-904f-45e4d181fb39] Running
I0916 17:16:13.769667 114275 system_pods.go:61] "kube-controller-manager-addons-539053" [c4755a4d-9e2b-49f4-ad5c-4da6a55139a6] Running
I0916 17:16:13.769679 114275 system_pods.go:61] "kube-ingress-dns-minikube" [cf1503c1-0d6b-4055-b323-9deca4c25d13] Pending / Ready:ContainersNotReady (containers with unready status: [minikube-ingress-dns]) / ContainersReady:ContainersNotReady (containers with unready status: [minikube-ingress-dns])
I0916 17:16:13.769687 114275 system_pods.go:61] "kube-proxy-bbn89" [e069094e-be35-4245-9ad9-c15c0632aaf3] Running
I0916 17:16:13.769694 114275 system_pods.go:61] "kube-scheduler-addons-539053" [96e3dac4-5f35-4db3-ac7e-9f02a07dd492] Running
I0916 17:16:13.769705 114275 system_pods.go:61] "metrics-server-84c5f94fbc-26pvn" [27fac5e6-437c-408b-b822-b4e2b393d323] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
I0916 17:16:13.769719 114275 system_pods.go:61] "nvidia-device-plugin-daemonset-vz86s" [aff62fc3-161b-49c5-9c01-0794bb7a44ae] Pending / Ready:ContainersNotReady (containers with unready status: [nvidia-device-plugin-ctr]) / ContainersReady:ContainersNotReady (containers with unready status: [nvidia-device-plugin-ctr])
I0916 17:16:13.769732 114275 system_pods.go:61] "registry-66c9cd494c-6df5h" [4849ea19-88f6-4fbc-ba0f-e290ee2d0d80] Pending / Ready:ContainersNotReady (containers with unready status: [registry]) / ContainersReady:ContainersNotReady (containers with unready status: [registry])
I0916 17:16:13.769743 114275 system_pods.go:61] "registry-proxy-9tc94" [556af332-2257-4db0-adcb-aca469cf992d] Pending / Ready:ContainersNotReady (containers with unready status: [registry-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [registry-proxy])
I0916 17:16:13.769755 114275 system_pods.go:61] "snapshot-controller-56fcc65765-mj4qq" [6755c5e5-4ee2-41ef-b33b-721fa04ab9b4] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
I0916 17:16:13.769768 114275 system_pods.go:61] "snapshot-controller-56fcc65765-mj66x" [88fabcb5-0d8c-41ba-8ebe-b7267c1c5381] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
I0916 17:16:13.769775 114275 system_pods.go:61] "storage-provisioner" [25007d02-d9ce-4f49-b276-c7bd60bf81eb] Running
I0916 17:16:13.769784 114275 system_pods.go:61] "tiller-deploy-b48cc5f79-42m8q" [f1e7c66d-de05-47ef-b306-073ce6ee059d] Pending / Ready:ContainersNotReady (containers with unready status: [tiller]) / ContainersReady:ContainersNotReady (containers with unready status: [tiller])
I0916 17:16:13.769794 114275 system_pods.go:74] duration metric: took 105.359796ms to wait for pod list to return data ...
I0916 17:16:13.769807 114275 default_sa.go:34] waiting for default service account to be created ...
I0916 17:16:13.772253 114275 default_sa.go:45] found service account: "default"
I0916 17:16:13.772276 114275 default_sa.go:55] duration metric: took 2.46165ms for default service account to be created ...
I0916 17:16:13.772286 114275 system_pods.go:116] waiting for k8s-apps to be running ...
I0916 17:16:13.781245 114275 system_pods.go:86] 18 kube-system pods found
I0916 17:16:13.781277 114275 system_pods.go:89] "coredns-7c65d6cfc9-wnhjq" [4245cb95-20b3-46fb-aed8-179d0f82e5d7] Running
I0916 17:16:13.781290 114275 system_pods.go:89] "csi-hostpath-attacher-0" [d988b7d5-120c-40e5-81c7-be8d1ed5f1ce] Pending / Ready:ContainersNotReady (containers with unready status: [csi-attacher]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-attacher])
I0916 17:16:13.781300 114275 system_pods.go:89] "csi-hostpath-resizer-0" [a718b743-e8df-48b9-9179-a2d18a674210] Pending / Ready:ContainersNotReady (containers with unready status: [csi-resizer]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-resizer])
I0916 17:16:13.781313 114275 system_pods.go:89] "csi-hostpathplugin-z7svh" [e382009f-305c-4940-934c-eefa26c102c2] Pending / Ready:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter])
I0916 17:16:13.781327 114275 system_pods.go:89] "etcd-addons-539053" [e815e4d8-c792-4ee9-b17b-ea384470c094] Running
I0916 17:16:13.781336 114275 system_pods.go:89] "kube-apiserver-addons-539053" [978344af-4506-4d7b-904f-45e4d181fb39] Running
I0916 17:16:13.781347 114275 system_pods.go:89] "kube-controller-manager-addons-539053" [c4755a4d-9e2b-49f4-ad5c-4da6a55139a6] Running
I0916 17:16:13.781357 114275 system_pods.go:89] "kube-ingress-dns-minikube" [cf1503c1-0d6b-4055-b323-9deca4c25d13] Pending / Ready:ContainersNotReady (containers with unready status: [minikube-ingress-dns]) / ContainersReady:ContainersNotReady (containers with unready status: [minikube-ingress-dns])
I0916 17:16:13.781364 114275 system_pods.go:89] "kube-proxy-bbn89" [e069094e-be35-4245-9ad9-c15c0632aaf3] Running
I0916 17:16:13.781374 114275 system_pods.go:89] "kube-scheduler-addons-539053" [96e3dac4-5f35-4db3-ac7e-9f02a07dd492] Running
I0916 17:16:13.781384 114275 system_pods.go:89] "metrics-server-84c5f94fbc-26pvn" [27fac5e6-437c-408b-b822-b4e2b393d323] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
I0916 17:16:13.781398 114275 system_pods.go:89] "nvidia-device-plugin-daemonset-vz86s" [aff62fc3-161b-49c5-9c01-0794bb7a44ae] Pending / Ready:ContainersNotReady (containers with unready status: [nvidia-device-plugin-ctr]) / ContainersReady:ContainersNotReady (containers with unready status: [nvidia-device-plugin-ctr])
I0916 17:16:13.781408 114275 system_pods.go:89] "registry-66c9cd494c-6df5h" [4849ea19-88f6-4fbc-ba0f-e290ee2d0d80] Pending / Ready:ContainersNotReady (containers with unready status: [registry]) / ContainersReady:ContainersNotReady (containers with unready status: [registry])
I0916 17:16:13.781421 114275 system_pods.go:89] "registry-proxy-9tc94" [556af332-2257-4db0-adcb-aca469cf992d] Pending / Ready:ContainersNotReady (containers with unready status: [registry-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [registry-proxy])
I0916 17:16:13.781433 114275 system_pods.go:89] "snapshot-controller-56fcc65765-mj4qq" [6755c5e5-4ee2-41ef-b33b-721fa04ab9b4] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
I0916 17:16:13.781448 114275 system_pods.go:89] "snapshot-controller-56fcc65765-mj66x" [88fabcb5-0d8c-41ba-8ebe-b7267c1c5381] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
I0916 17:16:13.781459 114275 system_pods.go:89] "storage-provisioner" [25007d02-d9ce-4f49-b276-c7bd60bf81eb] Running
I0916 17:16:13.781469 114275 system_pods.go:89] "tiller-deploy-b48cc5f79-42m8q" [f1e7c66d-de05-47ef-b306-073ce6ee059d] Pending / Ready:ContainersNotReady (containers with unready status: [tiller]) / ContainersReady:ContainersNotReady (containers with unready status: [tiller])
I0916 17:16:13.781482 114275 system_pods.go:126] duration metric: took 9.188191ms to wait for k8s-apps to be running ...
I0916 17:16:13.781496 114275 system_svc.go:44] waiting for kubelet service to be running ....
I0916 17:16:13.781551 114275 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I0916 17:16:13.794622 114275 system_svc.go:56] duration metric: took 13.119825ms WaitForService to wait for kubelet
I0916 17:16:13.794654 114275 kubeadm.go:582] duration metric: took 15.565233811s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0916 17:16:13.794677 114275 node_conditions.go:102] verifying NodePressure condition ...
I0916 17:16:13.797505 114275 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I0916 17:16:13.797533 114275 node_conditions.go:123] node cpu capacity is 8
I0916 17:16:13.797551 114275 node_conditions.go:105] duration metric: took 2.866224ms to run NodePressure ...
I0916 17:16:13.797565 114275 start.go:241] waiting for startup goroutines ...
I0916 17:16:13.962231 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:13.983769 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:14.078184 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:14.462907 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:14.482993 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:14.578883 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:14.962454 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:14.982721 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:15.078613 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:15.462247 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:15.483254 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:15.579298 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:15.963102 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:15.983705 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:16.078527 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:16.462749 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:16.482791 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:16.578398 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:16.961893 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:16.983869 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:17.078542 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:17.462394 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:17.482341 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:17.633183 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:17.962438 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:17.983370 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:18.079311 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:18.461448 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:18.483104 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:18.578692 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:18.962473 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:18.982714 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:19.077998 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:19.461850 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:19.482540 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:19.577871 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:19.961691 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:19.983013 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:20.077967 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:20.461806 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:20.482839 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:20.578336 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:20.962540 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:20.982235 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:21.078036 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:21.462499 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:21.482741 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:21.578348 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:21.962481 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:21.982720 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:22.078546 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:22.462139 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:22.483569 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:22.578227 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:22.962577 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:22.982899 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:23.079097 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:23.462926 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:23.483727 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:23.578281 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:23.962954 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:24.065416 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:24.078467 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:24.462536 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:24.482359 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:24.578733 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:24.962481 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:24.982250 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:25.077537 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:25.461543 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:25.483276 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:25.578269 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:25.962034 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:25.983175 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:26.078490 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:26.462196 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:26.482697 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:26.578314 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:26.961872 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:26.983177 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:27.078475 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:27.461746 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:27.482528 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:27.577890 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:27.960966 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:27.982914 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:28.078060 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:28.462256 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:28.482875 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:28.578198 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:28.962781 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:28.982936 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:29.078505 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:29.462795 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:29.482915 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:29.578230 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:29.961807 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:29.982679 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:30.078050 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:30.461972 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:30.482603 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:30.578304 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:30.961797 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:30.982757 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:31.078272 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:31.462725 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:31.482708 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:31.579087 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:31.962334 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:31.982975 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:32.078703 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:32.462330 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:32.522551 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:32.577946 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:32.961933 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:32.983020 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:33.078451 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:33.462446 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:33.482265 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:33.577480 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:33.961946 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:33.982932 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:34.078181 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:34.462080 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:34.483334 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:34.577669 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:34.961639 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:34.982507 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:35.078017 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:35.462721 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:35.483140 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:35.579193 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:35.961303 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:35.982714 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:36.078023 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:36.461970 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:36.482637 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:36.579530 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:36.961899 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:36.983597 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:37.078895 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:37.462454 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:37.482726 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:37.578817 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:37.962696 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:38.064502 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:38.077685 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:38.462019 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:38.482872 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:38.578282 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:38.961734 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:38.982825 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:39.078754 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:39.461434 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:39.482191 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:39.578582 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:39.963044 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:39.983703 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:40.078868 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:40.462644 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:40.483584 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:40.579087 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:40.962620 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:40.983108 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:41.078442 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:41.462575 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:41.600507 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:41.600580 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:41.961866 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:41.983128 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:42.078733 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:42.462257 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:42.483467 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:42.578935 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:42.961920 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:42.982878 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:43.078249 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:43.461281 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:43.482534 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:43.578704 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:43.962133 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:43.982816 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:44.078952 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:44.461773 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:44.482928 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:44.578290 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:44.961576 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:44.982226 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:45.078709 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:45.462385 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:45.482569 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:45.578527 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:45.962565 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:45.983014 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:46.078220 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:46.461863 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:46.482907 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:46.578400 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:46.961016 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:46.983007 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:47.078003 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:47.462179 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:47.482384 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:47.579372 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:47.962438 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:47.983133 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:48.078864 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:48.462142 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:48.483698 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:48.579883 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:48.962432 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:48.982597 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:49.078722 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:49.462503 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:49.483067 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:49.579032 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:49.961929 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:49.983467 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:50.079161 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:50.462327 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:50.482527 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:50.577912 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:50.961721 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:50.982654 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:51.078046 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:51.462333 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0916 17:16:51.482273 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:51.577803 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:51.962011 114275 kapi.go:107] duration metric: took 41.003859416s to wait for kubernetes.io/minikube-addons=registry ...
I0916 17:16:51.982779 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:52.078054 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:52.483652 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:52.579398 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:52.983507 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:53.084011 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:53.483573 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:53.579076 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:53.983352 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:54.077879 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:54.482186 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:54.578566 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:54.983013 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:55.083118 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:55.484598 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:55.578997 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:55.982754 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:56.078302 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:56.483440 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:56.578810 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:56.983348 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:57.078155 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:57.483166 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:57.578713 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:57.984333 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:58.078378 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:58.483254 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:58.577735 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:58.982551 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:59.078195 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:59.482582 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:16:59.577927 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:16:59.983426 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:00.077763 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:00.483871 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:00.579028 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:00.983566 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:01.078431 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:01.483533 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:01.577992 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:01.983578 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:02.079330 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:02.484505 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:02.584228 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:02.983438 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:03.078542 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:03.483110 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:03.578667 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:03.983322 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:04.078748 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:04.551846 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:04.578478 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:04.984099 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:05.078967 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:05.483821 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:05.578442 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:05.985824 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:06.085274 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:06.482915 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:06.578286 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:06.982801 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:07.078413 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:07.482349 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:07.578108 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:07.983395 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:08.078523 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:08.483979 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:08.578727 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:08.983956 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:09.078635 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:09.483716 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:09.603657 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:09.982861 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:10.082925 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:10.482296 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:10.578543 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:10.983226 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:11.078143 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:11.482678 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:11.578699 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:11.983564 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:12.079092 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:12.485317 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:12.579231 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:12.983732 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:13.078565 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:13.482520 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:13.577976 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:13.982645 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:14.077861 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:14.482278 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:14.578004 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:14.984166 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:15.078857 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:15.484298 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:15.579403 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:15.983870 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:16.078667 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:16.483104 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:16.579286 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:16.983828 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:17.084323 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:17.483361 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:17.582010 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:17.984228 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:18.084269 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:18.483634 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:18.579191 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:18.982859 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:19.083388 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:19.483356 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:19.578825 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:19.983578 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:20.078244 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:20.484596 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:20.578834 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:20.983817 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:21.078032 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:21.483335 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:21.578994 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:21.982543 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:22.078223 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:22.483954 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:22.583983 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:22.984968 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:23.078718 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:23.482514 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:23.578451 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:23.984093 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:24.078647 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:24.483481 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:24.579178 114275 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0916 17:17:25.049566 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:25.078409 114275 kapi.go:107] duration metric: took 1m17.004084413s to wait for app.kubernetes.io/name=ingress-nginx ...
I0916 17:17:25.482991 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:25.984150 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:26.484246 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:26.983927 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:27.483517 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:27.983590 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:28.483749 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:28.984014 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:29.484311 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:29.982884 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0916 17:17:30.482601 114275 kapi.go:107] duration metric: took 1m18.503954795s to wait for kubernetes.io/minikube-addons=csi-hostpath-driver ...
I0916 17:17:36.170512 114275 kapi.go:86] Found 1 Pods for label selector kubernetes.io/minikube-addons=gcp-auth
I0916 17:17:36.170534 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:36.670451 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:37.169058 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:37.670186 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:38.170398 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:38.669314 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:39.169063 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:39.670386 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:40.170198 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:40.669698 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:41.169816 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:41.669990 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:42.169931 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:42.670426 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:43.170255 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:43.670136 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:44.169916 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:44.669597 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:45.169475 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:45.669370 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:46.170232 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:46.670224 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:47.169031 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:47.669746 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:48.169803 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:48.669903 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:49.169778 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:49.669391 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:50.170208 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:50.669788 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:51.169686 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:51.669769 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:52.170444 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:52.669820 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:53.169611 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:53.669659 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:54.169947 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:54.669741 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:55.169852 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:55.669746 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:56.169505 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:56.669820 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:57.169896 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:57.669972 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:58.170510 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:58.669999 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:59.169671 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:17:59.669379 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:00.170140 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:00.670006 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:01.169955 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:01.670342 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:02.170661 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:02.670338 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:03.169750 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:03.669809 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:04.169848 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:04.669541 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:05.169351 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:05.669743 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:06.170140 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:06.669947 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:07.169977 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:07.670273 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:08.170092 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:08.669606 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:09.169457 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:09.669304 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:10.170261 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:10.669805 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:11.170187 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:11.670019 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:12.170333 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:12.670156 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:13.169866 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:13.669707 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:14.170129 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:14.669902 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:15.169837 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:15.669876 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:16.170225 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:16.670491 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:17.169454 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:17.670329 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:18.170496 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:18.669562 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:19.169325 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:19.669701 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:20.170144 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:20.669831 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:21.169779 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:21.669952 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:22.170438 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:22.670438 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:23.170005 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:23.669882 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:24.170219 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:24.669951 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:25.169767 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:25.669655 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:26.169876 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:26.670255 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:27.169755 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:27.669516 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:28.169738 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:28.671547 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:29.169462 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:29.669294 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:30.170113 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:30.669791 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:31.169850 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:31.670060 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:32.170436 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:32.671115 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:33.169976 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:33.670051 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:34.170441 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:34.669849 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:35.169722 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:35.669675 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:36.169772 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:36.669697 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:37.170280 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:37.670298 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:38.170153 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:38.669055 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:39.169981 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:39.670187 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:40.170198 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:40.670164 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:41.170535 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:41.670437 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:42.170472 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:42.670438 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:43.170014 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:43.697546 114275 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0916 17:18:44.169429 114275 kapi.go:107] duration metric: took 2m30.50306224s to wait for kubernetes.io/minikube-addons=gcp-auth ...
I0916 17:18:44.171023 114275 out.go:177] * Your GCP credentials will now be mounted into every pod created in the addons-539053 cluster.
I0916 17:18:44.172480 114275 out.go:177] * If you don't want your credentials mounted into a specific pod, add a label with the `gcp-auth-skip-secret` key to your pod configuration.
I0916 17:18:44.173733 114275 out.go:177] * If you want existing pods to be mounted with credentials, either recreate them or rerun addons enable with --refresh.
I0916 17:18:44.175110 114275 out.go:177] * Enabled addons: ingress-dns, storage-provisioner, storage-provisioner-rancher, nvidia-device-plugin, cloud-spanner, default-storageclass, volcano, helm-tiller, inspektor-gadget, metrics-server, yakd, volumesnapshots, registry, ingress, csi-hostpath-driver, gcp-auth
I0916 17:18:44.176398 114275 addons.go:510] duration metric: took 2m45.946962174s for enable addons: enabled=[ingress-dns storage-provisioner storage-provisioner-rancher nvidia-device-plugin cloud-spanner default-storageclass volcano helm-tiller inspektor-gadget metrics-server yakd volumesnapshots registry ingress csi-hostpath-driver gcp-auth]
I0916 17:18:44.176438 114275 start.go:246] waiting for cluster config update ...
I0916 17:18:44.176461 114275 start.go:255] writing updated cluster config ...
I0916 17:18:44.176757 114275 ssh_runner.go:195] Run: rm -f paused
I0916 17:18:44.228658 114275 start.go:600] kubectl: 1.31.1, cluster: 1.31.1 (minor skew: 0)
I0916 17:18:44.230455 114275 out.go:177] * Done! kubectl is now configured to use "addons-539053" cluster and "default" namespace by default
==> Docker <==
Sep 16 17:28:24 addons-539053 cri-dockerd[1599]: time="2024-09-16T17:28:24Z" level=error msg="Set backoffDuration to : 1m0s for container ID '47129abf88469d14af7141e84fab19c6666750b545c5b0631f83ea9c8e5b2880'"
Sep 16 17:28:24 addons-539053 cri-dockerd[1599]: time="2024-09-16T17:28:24Z" level=error msg="error getting RW layer size for container ID 'f1e92bed97f1ae8e3f53c54b614cdf5392b2a2c708713ccca5cce82d871ab399': Error response from daemon: No such container: f1e92bed97f1ae8e3f53c54b614cdf5392b2a2c708713ccca5cce82d871ab399"
Sep 16 17:28:24 addons-539053 cri-dockerd[1599]: time="2024-09-16T17:28:24Z" level=error msg="Set backoffDuration to : 1m0s for container ID 'f1e92bed97f1ae8e3f53c54b614cdf5392b2a2c708713ccca5cce82d871ab399'"
Sep 16 17:28:24 addons-539053 cri-dockerd[1599]: time="2024-09-16T17:28:24Z" level=error msg="error getting RW layer size for container ID '0505e3bc339fa5be4a8f2a0ebc44e15018e7f7c97fc9cb77f62c767415a17180': Error response from daemon: No such container: 0505e3bc339fa5be4a8f2a0ebc44e15018e7f7c97fc9cb77f62c767415a17180"
Sep 16 17:28:24 addons-539053 cri-dockerd[1599]: time="2024-09-16T17:28:24Z" level=error msg="Set backoffDuration to : 1m0s for container ID '0505e3bc339fa5be4a8f2a0ebc44e15018e7f7c97fc9cb77f62c767415a17180'"
Sep 16 17:28:26 addons-539053 cri-dockerd[1599]: time="2024-09-16T17:28:26Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/df3cb3216e7b2532f5419de5758bfe5fde68056c58729526d4e36a7632ed9e12/resolv.conf as [nameserver 10.96.0.10 search headlamp.svc.cluster.local svc.cluster.local cluster.local southamerica-west1-a.c.k8s-minikube.internal c.k8s-minikube.internal google.internal options ndots:5]"
Sep 16 17:28:26 addons-539053 dockerd[1335]: time="2024-09-16T17:28:26.596370094Z" level=warning msg="reference for unknown type: " digest="sha256:8825bb13459c64dcf9503d836b94b49c97dc831aff7c325a6eed68961388cf9c" remote="ghcr.io/headlamp-k8s/headlamp@sha256:8825bb13459c64dcf9503d836b94b49c97dc831aff7c325a6eed68961388cf9c"
Sep 16 17:28:29 addons-539053 dockerd[1335]: time="2024-09-16T17:28:29.068655935Z" level=info msg="ignoring event" container=c87b579f39f03e7dd74cfc8e98f21b02379d5cfd0ce59bd821e06fef7ff09593 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 16 17:28:29 addons-539053 dockerd[1335]: time="2024-09-16T17:28:29.071267133Z" level=info msg="ignoring event" container=a70c2f13c30f71ed58c17c69e4854d55498ffcf49d9c747b111cc48547531455 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 16 17:28:29 addons-539053 dockerd[1335]: time="2024-09-16T17:28:29.288638061Z" level=info msg="ignoring event" container=c49ad22424f957b4df1600a3763506700b034d1a7df6d3367b20ec6b19e553c7 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 16 17:28:29 addons-539053 dockerd[1335]: time="2024-09-16T17:28:29.295676627Z" level=info msg="ignoring event" container=b1940c3dced086f5673e7a3c6bf5b0497bbceaee3fda51e05be7517f4c947079 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 16 17:28:32 addons-539053 cri-dockerd[1599]: time="2024-09-16T17:28:32Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/2e229c2504e4309757a439a5e4eda9ae6e9f4ba13849bff39acc1c7c23e7e0ed/resolv.conf as [nameserver 10.96.0.10 search default.svc.cluster.local svc.cluster.local cluster.local southamerica-west1-a.c.k8s-minikube.internal c.k8s-minikube.internal google.internal options ndots:5]"
Sep 16 17:28:32 addons-539053 dockerd[1335]: time="2024-09-16T17:28:32.208660805Z" level=info msg="ignoring event" container=9843ad7bd148dabe17e3aef84e11dbe3c3fa57d24e0c80f876959e3b8c41cfcd module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 16 17:28:32 addons-539053 dockerd[1335]: time="2024-09-16T17:28:32.276042145Z" level=info msg="ignoring event" container=2a1c852267cc02ee23be14874ce5fb8e623d5957c7bfcd42ba5f297bb4e2c74b module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 16 17:28:34 addons-539053 cri-dockerd[1599]: time="2024-09-16T17:28:34Z" level=info msg="Stop pulling image ghcr.io/headlamp-k8s/headlamp:v0.25.1@sha256:8825bb13459c64dcf9503d836b94b49c97dc831aff7c325a6eed68961388cf9c: Status: Downloaded newer image for ghcr.io/headlamp-k8s/headlamp@sha256:8825bb13459c64dcf9503d836b94b49c97dc831aff7c325a6eed68961388cf9c"
Sep 16 17:28:37 addons-539053 dockerd[1335]: time="2024-09-16T17:28:37.091903555Z" level=info msg="Container failed to exit within 2s of signal 15 - using the force" container=076d4be4b58e4948fa5151c3c46abe6d974fb02a9bc025801258f081bab349f3
Sep 16 17:28:37 addons-539053 dockerd[1335]: time="2024-09-16T17:28:37.147524879Z" level=info msg="ignoring event" container=076d4be4b58e4948fa5151c3c46abe6d974fb02a9bc025801258f081bab349f3 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 16 17:28:37 addons-539053 cri-dockerd[1599]: time="2024-09-16T17:28:37Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"ingress-nginx-controller-bc57996ff-j5nxx_ingress-nginx\": unexpected command output Device \"eth0\" does not exist.\n with error: exit status 1"
Sep 16 17:28:37 addons-539053 dockerd[1335]: time="2024-09-16T17:28:37.281181532Z" level=info msg="ignoring event" container=7a956fa4d115d515dde94d8f62f48e2a1e8a0c8c6efc7a9896c5b2ae0bfca5b3 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 16 17:28:37 addons-539053 cri-dockerd[1599]: time="2024-09-16T17:28:37Z" level=info msg="Stop pulling image docker.io/kicbase/echo-server:1.0: Status: Downloaded newer image for kicbase/echo-server:1.0"
Sep 16 17:28:39 addons-539053 dockerd[1335]: time="2024-09-16T17:28:39.519845873Z" level=info msg="ignoring event" container=05d9b8b1d2b9137352fd9da6e9b7a625db99434d827fe5f557e938c1e70c2e90 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 16 17:28:39 addons-539053 dockerd[1335]: time="2024-09-16T17:28:39.983094618Z" level=info msg="ignoring event" container=dfd31e12ab9bfbc878163c2a80b52317b5ad35e716370d52f09f91e8aacd5aba module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 16 17:28:40 addons-539053 dockerd[1335]: time="2024-09-16T17:28:40.061304860Z" level=info msg="ignoring event" container=4c73f7e2709ed5d4d891821b20b365174723c0b3873e814415e40e2ddc6f9e2f module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 16 17:28:40 addons-539053 dockerd[1335]: time="2024-09-16T17:28:40.183622592Z" level=info msg="ignoring event" container=6ac17af3097bd415c9ad178449ea72399d4d8b6fe7e470e48b30da8e4f2409e6 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 16 17:28:40 addons-539053 dockerd[1335]: time="2024-09-16T17:28:40.213768904Z" level=info msg="ignoring event" container=f9faf95fd60475bffcd8bf735ab1d2d30c99e5e22ae3b3c41b84fa0abc109e18 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD
ef034ee4d4d32 kicbase/echo-server@sha256:127ac38a2bb9537b7f252addff209ea6801edcac8a92c8b1104dacd66a583ed6 3 seconds ago Running hello-world-app 0 2e229c2504e43 hello-world-app-55bf9c44b4-t96vs
42c9065ab8bf9 ghcr.io/headlamp-k8s/headlamp@sha256:8825bb13459c64dcf9503d836b94b49c97dc831aff7c325a6eed68961388cf9c 6 seconds ago Running headlamp 0 df3cb3216e7b2 headlamp-7b5c95b59d-fbdl8
a5d4a38ea5d8d nginx@sha256:a5127daff3d6f4606be3100a252419bfa84fd6ee5cd74d0feaca1a5068f97dcf 16 seconds ago Running nginx 0 a219ddee24cca nginx
1d005c612eb5e gcr.io/k8s-minikube/gcp-auth-webhook@sha256:e6c5b3bc32072ea370d34c27836efd11b3519d25bd444c2a8efc339cff0e20fb 9 minutes ago Running gcp-auth 0 4b8b65c562239 gcp-auth-89d5ffd79-g5vjr
ff7e55018fc5c ce263a8653f9c 11 minutes ago Exited patch 1 f00bf5e9346c1 ingress-nginx-admission-patch-rjmgh
3978b6e47c997 registry.k8s.io/ingress-nginx/kube-webhook-certgen@sha256:a320a50cc91bd15fd2d6fa6de58bd98c1bd64b9a6f926ce23a600d87043455a3 11 minutes ago Exited create 0 6ac7fa646745c ingress-nginx-admission-create-c9s92
81f5330bbecca 6e38f40d628db 12 minutes ago Running storage-provisioner 0 6021c44abb111 storage-provisioner
1364b0b4b1295 c69fa2e9cbf5f 12 minutes ago Running coredns 0 336f91b8e8668 coredns-7c65d6cfc9-wnhjq
464a6082c92d2 60c005f310ff3 12 minutes ago Running kube-proxy 0 df8f8d4ee5ab8 kube-proxy-bbn89
4bb47611cde2c 2e96e5913fc06 12 minutes ago Running etcd 0 1d45d71be388f etcd-addons-539053
71598060e1539 9aa1fad941575 12 minutes ago Running kube-scheduler 0 a517be7aece40 kube-scheduler-addons-539053
adee2fab665b0 6bab7719df100 12 minutes ago Running kube-apiserver 0 4fd4c56b95406 kube-apiserver-addons-539053
ddf08198399f9 175ffd71cce3d 12 minutes ago Running kube-controller-manager 0 74b7a82ac60d5 kube-controller-manager-addons-539053
==> coredns [1364b0b4b129] <==
[INFO] 10.244.0.22:57420 - 45886 "AAAA IN hello-world-app.default.svc.cluster.local.c.k8s-minikube.internal. udp 83 false 512" NXDOMAIN qr,rd,ra 83 0.007515328s
[INFO] 10.244.0.22:38041 - 34930 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.003137688s
[INFO] 10.244.0.22:44061 - 46554 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.007263598s
[INFO] 10.244.0.22:47473 - 48555 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.007810311s
[INFO] 10.244.0.22:49691 - 43923 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.00766984s
[INFO] 10.244.0.22:57420 - 62481 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.005043512s
[INFO] 10.244.0.22:58098 - 18081 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.007797378s
[INFO] 10.244.0.22:35638 - 26250 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.007750623s
[INFO] 10.244.0.22:40013 - 5118 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.005231309s
[INFO] 10.244.0.22:40013 - 54757 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.055780546s
[INFO] 10.244.0.22:57420 - 54355 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.056908987s
[INFO] 10.244.0.22:40013 - 16750 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000075086s
[INFO] 10.244.0.22:57420 - 9350 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000081444s
[INFO] 10.244.0.22:49691 - 14661 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.003192004s
[INFO] 10.244.0.22:44061 - 50318 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.005302067s
[INFO] 10.244.0.22:35638 - 22487 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.005333814s
[INFO] 10.244.0.22:38041 - 5951 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.060976887s
[INFO] 10.244.0.22:47473 - 65406 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.00410309s
[INFO] 10.244.0.22:38041 - 215 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000059408s
[INFO] 10.244.0.22:35638 - 57010 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000075901s
[INFO] 10.244.0.22:44061 - 47572 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000102401s
[INFO] 10.244.0.22:49691 - 50644 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000088036s
[INFO] 10.244.0.22:58098 - 43549 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.003215002s
[INFO] 10.244.0.22:47473 - 9005 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000078792s
[INFO] 10.244.0.22:58098 - 921 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000065791s
==> describe nodes <==
Name: addons-539053
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=addons-539053
kubernetes.io/os=linux
minikube.k8s.io/commit=91d692c919753635ac118b7ed7ae5503b67c63c8
minikube.k8s.io/name=addons-539053
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2024_09_16T17_15_54_0700
minikube.k8s.io/version=v1.34.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
topology.hostpath.csi/node=addons-539053
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///var/run/cri-dockerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Mon, 16 Sep 2024 17:15:50 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: addons-539053
AcquireTime: <unset>
RenewTime: Mon, 16 Sep 2024 17:28:37 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Mon, 16 Sep 2024 17:27:58 +0000 Mon, 16 Sep 2024 17:15:49 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Mon, 16 Sep 2024 17:27:58 +0000 Mon, 16 Sep 2024 17:15:49 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Mon, 16 Sep 2024 17:27:58 +0000 Mon, 16 Sep 2024 17:15:49 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Mon, 16 Sep 2024 17:27:58 +0000 Mon, 16 Sep 2024 17:15:51 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.49.2
Hostname: addons-539053
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32859320Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32859320Ki
pods: 110
System Info:
Machine ID: f1bbe9e4b66c429aa7dafe493aa619a6
System UUID: 064c562e-413e-4af6-ba4a-4df004b28d4d
Boot ID: 606f120e-2bee-42b2-a3a5-24f53b1f28a3
Kernel Version: 5.15.0-1068-gcp
OS Image: Ubuntu 22.04.4 LTS
Operating System: linux
Architecture: amd64
Container Runtime Version: docker://27.2.1
Kubelet Version: v1.31.1
Kube-Proxy Version: v1.31.1
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (12 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 9m13s
default hello-world-app-55bf9c44b4-t96vs 0 (0%) 0 (0%) 0 (0%) 0 (0%) 9s
default nginx 0 (0%) 0 (0%) 0 (0%) 0 (0%) 20s
gcp-auth gcp-auth-89d5ffd79-g5vjr 0 (0%) 0 (0%) 0 (0%) 0 (0%) 11m
headlamp headlamp-7b5c95b59d-fbdl8 0 (0%) 0 (0%) 0 (0%) 0 (0%) 15s
kube-system coredns-7c65d6cfc9-wnhjq 100m (1%) 0 (0%) 70Mi (0%) 170Mi (0%) 12m
kube-system etcd-addons-539053 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 12m
kube-system kube-apiserver-addons-539053 250m (3%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system kube-controller-manager-addons-539053 200m (2%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system kube-proxy-bbn89 0 (0%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system kube-scheduler-addons-539053 100m (1%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 12m
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 750m (9%) 0 (0%)
memory 170Mi (0%) 170Mi (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 12m kube-proxy
Normal Starting 12m kubelet Starting kubelet.
Warning CgroupV1 12m kubelet Cgroup v1 support is in maintenance mode, please migrate to Cgroup v2.
Normal NodeAllocatableEnforced 12m kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 12m kubelet Node addons-539053 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 12m kubelet Node addons-539053 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 12m kubelet Node addons-539053 status is now: NodeHasSufficientPID
Normal RegisteredNode 12m node-controller Node addons-539053 event: Registered Node addons-539053 in Controller
==> dmesg <==
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff 7e e5 48 0a fd 02 08 06
[ +8.598564] IPv4: martian source 10.244.0.1 from 10.244.0.22, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff 06 8e ad 9e 67 2a 08 06
[ +1.081096] IPv4: martian source 10.244.0.1 from 10.244.0.23, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff 4e 31 79 46 37 3b 08 06
[ +1.333669] IPv4: martian source 10.244.0.1 from 10.244.0.20, on dev eth0
[ +0.000017] ll header: 00000000: ff ff ff ff ff ff ba f3 f0 ed d9 71 08 06
[ +0.323712] IPv4: martian source 10.244.0.1 from 10.244.0.21, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff 26 19 09 ed 61 6c 08 06
[ +0.055042] IPv4: martian source 10.244.0.1 from 10.244.0.19, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff e2 ed dc cd fa 06 08 06
[Sep16 17:18] IPv4: martian source 10.244.0.1 from 10.244.0.24, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff 6e c5 fc d3 80 02 08 06
[ +0.096493] IPv4: martian source 10.244.0.1 from 10.244.0.25, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff 12 0f 00 b6 c1 f4 08 06
[ +26.268961] IPv4: martian source 10.244.0.1 from 10.244.0.26, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff 92 c5 a8 10 d2 85 08 06
[ +0.000580] IPv4: martian source 10.244.0.26 from 10.244.0.3, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff 92 61 9e bb 7c 6d 08 06
[Sep16 17:27] IPv4: martian source 10.244.0.1 from 10.244.0.32, on dev eth0
[ +0.000009] ll header: 00000000: ff ff ff ff ff ff 62 03 d4 4e 61 be 08 06
[Sep16 17:28] IPv4: martian source 10.244.0.36 from 10.244.0.22, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff 06 8e ad 9e 67 2a 08 06
[ +1.949689] IPv4: martian source 10.244.0.22 from 10.244.0.3, on dev eth0
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff 92 61 9e bb 7c 6d 08 06
==> etcd [4bb47611cde2] <==
{"level":"info","ts":"2024-09-16T17:15:49.047337Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc is starting a new election at term 1"}
{"level":"info","ts":"2024-09-16T17:15:49.047398Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became pre-candidate at term 1"}
{"level":"info","ts":"2024-09-16T17:15:49.047435Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc received MsgPreVoteResp from aec36adc501070cc at term 1"}
{"level":"info","ts":"2024-09-16T17:15:49.047464Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became candidate at term 2"}
{"level":"info","ts":"2024-09-16T17:15:49.047477Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc received MsgVoteResp from aec36adc501070cc at term 2"}
{"level":"info","ts":"2024-09-16T17:15:49.047489Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became leader at term 2"}
{"level":"info","ts":"2024-09-16T17:15:49.047502Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: aec36adc501070cc elected leader aec36adc501070cc at term 2"}
{"level":"info","ts":"2024-09-16T17:15:49.048501Z","caller":"etcdserver/server.go:2118","msg":"published local member to cluster through raft","local-member-id":"aec36adc501070cc","local-member-attributes":"{Name:addons-539053 ClientURLs:[https://192.168.49.2:2379]}","request-path":"/0/members/aec36adc501070cc/attributes","cluster-id":"fa54960ea34d58be","publish-timeout":"7s"}
{"level":"info","ts":"2024-09-16T17:15:49.048506Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2024-09-16T17:15:49.048530Z","caller":"etcdserver/server.go:2629","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2024-09-16T17:15:49.048543Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2024-09-16T17:15:49.048893Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2024-09-16T17:15:49.048934Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2024-09-16T17:15:49.049709Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"fa54960ea34d58be","local-member-id":"aec36adc501070cc","cluster-version":"3.5"}
{"level":"info","ts":"2024-09-16T17:15:49.049791Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2024-09-16T17:15:49.049817Z","caller":"etcdserver/server.go:2653","msg":"cluster version is updated","cluster-version":"3.5"}
{"level":"info","ts":"2024-09-16T17:15:49.049789Z","caller":"v3rpc/health.go:61","msg":"grpc service status changed","service":"","status":"SERVING"}
{"level":"info","ts":"2024-09-16T17:15:49.049851Z","caller":"v3rpc/health.go:61","msg":"grpc service status changed","service":"","status":"SERVING"}
{"level":"info","ts":"2024-09-16T17:15:49.050664Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"info","ts":"2024-09-16T17:15:49.051530Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.49.2:2379"}
{"level":"warn","ts":"2024-09-16T17:17:09.601392Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"110.36106ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/services/endpoints/kube-system/k8s.io-minikube-hostpath\" ","response":"range_response_count:1 size:1113"}
{"level":"info","ts":"2024-09-16T17:17:09.601492Z","caller":"traceutil/trace.go:171","msg":"trace[1523185871] range","detail":"{range_begin:/registry/services/endpoints/kube-system/k8s.io-minikube-hostpath; range_end:; response_count:1; response_revision:1150; }","duration":"110.473203ms","start":"2024-09-16T17:17:09.491000Z","end":"2024-09-16T17:17:09.601474Z","steps":["trace[1523185871] 'range keys from in-memory index tree' (duration: 110.241782ms)"],"step_count":1}
{"level":"info","ts":"2024-09-16T17:25:49.176285Z","caller":"mvcc/index.go:214","msg":"compact tree index","revision":1867}
{"level":"info","ts":"2024-09-16T17:25:49.202756Z","caller":"mvcc/kvstore_compaction.go:69","msg":"finished scheduled compaction","compact-revision":1867,"took":"25.896596ms","hash":2187363218,"current-db-size-bytes":8957952,"current-db-size":"9.0 MB","current-db-size-in-use-bytes":4980736,"current-db-size-in-use":"5.0 MB"}
{"level":"info","ts":"2024-09-16T17:25:49.202798Z","caller":"mvcc/hash.go:137","msg":"storing new hash","hash":2187363218,"revision":1867,"compact-revision":-1}
==> gcp-auth [1d005c612eb5] <==
2024/09/16 17:19:27 Ready to write response ...
2024/09/16 17:27:29 Ready to marshal response ...
2024/09/16 17:27:29 Ready to write response ...
2024/09/16 17:27:29 Ready to marshal response ...
2024/09/16 17:27:29 Ready to write response ...
2024/09/16 17:27:39 Ready to marshal response ...
2024/09/16 17:27:39 Ready to write response ...
2024/09/16 17:27:39 Ready to marshal response ...
2024/09/16 17:27:39 Ready to write response ...
2024/09/16 17:27:42 Ready to marshal response ...
2024/09/16 17:27:42 Ready to write response ...
2024/09/16 17:27:42 Ready to marshal response ...
2024/09/16 17:27:42 Ready to write response ...
2024/09/16 17:28:13 Ready to marshal response ...
2024/09/16 17:28:13 Ready to write response ...
2024/09/16 17:28:19 Ready to marshal response ...
2024/09/16 17:28:19 Ready to write response ...
2024/09/16 17:28:25 Ready to marshal response ...
2024/09/16 17:28:25 Ready to write response ...
2024/09/16 17:28:25 Ready to marshal response ...
2024/09/16 17:28:25 Ready to write response ...
2024/09/16 17:28:25 Ready to marshal response ...
2024/09/16 17:28:25 Ready to write response ...
2024/09/16 17:28:31 Ready to marshal response ...
2024/09/16 17:28:31 Ready to write response ...
==> kernel <==
17:28:41 up 1:11, 0 users, load average: 0.76, 0.43, 0.54
Linux addons-539053 5.15.0-1068-gcp #76~20.04.1-Ubuntu SMP Tue Aug 20 15:52:45 UTC 2024 x86_64 x86_64 x86_64 GNU/Linux
PRETTY_NAME="Ubuntu 22.04.4 LTS"
==> kube-apiserver [adee2fab665b] <==
W0916 17:19:19.251510 1 cacher.go:171] Terminating all watchers from cacher jobs.batch.volcano.sh
W0916 17:19:19.351255 1 cacher.go:171] Terminating all watchers from cacher jobflows.flow.volcano.sh
W0916 17:19:19.669676 1 cacher.go:171] Terminating all watchers from cacher jobtemplates.flow.volcano.sh
I0916 17:27:49.597327 1 controller.go:129] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Nothing (removed from the queue).
I0916 17:27:51.723728 1 controller.go:615] quota admission added evaluator for: volumesnapshots.snapshot.storage.k8s.io
E0916 17:27:58.033561 1 authentication.go:73] "Unable to authenticate the request" err="[invalid bearer token, serviceaccounts \"local-path-provisioner-service-account\" not found]"
I0916 17:28:14.407767 1 handler.go:286] Adding GroupVersion gadget.kinvolk.io v1alpha1 to ResourceManager
W0916 17:28:15.423270 1 cacher.go:171] Terminating all watchers from cacher traces.gadget.kinvolk.io
I0916 17:28:19.861018 1 controller.go:615] quota admission added evaluator for: ingresses.networking.k8s.io
I0916 17:28:20.053742 1 alloc.go:330] "allocated clusterIPs" service="default/nginx" clusterIPs={"IPv4":"10.110.93.193"}
I0916 17:28:25.652699 1 alloc.go:330] "allocated clusterIPs" service="headlamp/headlamp" clusterIPs={"IPv4":"10.110.205.176"}
I0916 17:28:28.764908 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0916 17:28:28.764977 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0916 17:28:28.777502 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0916 17:28:28.777554 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0916 17:28:28.778613 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0916 17:28:28.778652 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0916 17:28:28.861947 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0916 17:28:28.861997 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0916 17:28:28.866605 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0916 17:28:28.866641 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
W0916 17:28:29.779344 1 cacher.go:171] Terminating all watchers from cacher volumesnapshotclasses.snapshot.storage.k8s.io
W0916 17:28:29.866668 1 cacher.go:171] Terminating all watchers from cacher volumesnapshotcontents.snapshot.storage.k8s.io
W0916 17:28:29.960520 1 cacher.go:171] Terminating all watchers from cacher volumesnapshots.snapshot.storage.k8s.io
I0916 17:28:31.532682 1 alloc.go:330] "allocated clusterIPs" service="default/hello-world-app" clusterIPs={"IPv4":"10.103.40.186"}
==> kube-controller-manager [ddf08198399f] <==
W0916 17:28:33.984028 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0916 17:28:33.984074 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0916 17:28:34.058954 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0916 17:28:34.059005 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
I0916 17:28:34.065659 1 job_controller.go:568] "enqueueing job" logger="job-controller" key="ingress-nginx/ingress-nginx-admission-create" delay="0s"
I0916 17:28:34.067293 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="ingress-nginx/ingress-nginx-controller-bc57996ff" duration="7.327µs"
I0916 17:28:34.070938 1 job_controller.go:568] "enqueueing job" logger="job-controller" key="ingress-nginx/ingress-nginx-admission-patch" delay="0s"
W0916 17:28:34.446339 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0916 17:28:34.446384 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
I0916 17:28:35.207677 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="headlamp/headlamp-7b5c95b59d" duration="86.966µs"
I0916 17:28:35.225080 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="headlamp/headlamp-7b5c95b59d" duration="6.297113ms"
I0916 17:28:35.225148 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="headlamp/headlamp-7b5c95b59d" duration="37.321µs"
W0916 17:28:35.437541 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0916 17:28:35.437583 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0916 17:28:36.314711 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0916 17:28:36.314756 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0916 17:28:38.037090 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0916 17:28:38.037131 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
I0916 17:28:38.311213 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="default/hello-world-app-55bf9c44b4" duration="6.108163ms"
I0916 17:28:38.311337 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="default/hello-world-app-55bf9c44b4" duration="73.395µs"
W0916 17:28:38.527605 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0916 17:28:38.527645 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
I0916 17:28:39.948014 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/registry-66c9cd494c" duration="6.666µs"
W0916 17:28:39.978990 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0916 17:28:39.979030 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
==> kube-proxy [464a6082c92d] <==
I0916 17:16:01.252716 1 server_linux.go:66] "Using iptables proxy"
I0916 17:16:01.762647 1 server.go:677] "Successfully retrieved node IP(s)" IPs=["192.168.49.2"]
E0916 17:16:01.762715 1 server.go:234] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I0916 17:16:02.262215 1 server.go:243] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I0916 17:16:02.262283 1 server_linux.go:169] "Using iptables Proxier"
I0916 17:16:02.267440 1 proxier.go:255] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I0916 17:16:02.267789 1 server.go:483] "Version info" version="v1.31.1"
I0916 17:16:02.267810 1 server.go:485] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I0916 17:16:02.348480 1 config.go:199] "Starting service config controller"
I0916 17:16:02.348530 1 shared_informer.go:313] Waiting for caches to sync for service config
I0916 17:16:02.348639 1 config.go:105] "Starting endpoint slice config controller"
I0916 17:16:02.348646 1 shared_informer.go:313] Waiting for caches to sync for endpoint slice config
I0916 17:16:02.348818 1 config.go:328] "Starting node config controller"
I0916 17:16:02.348827 1 shared_informer.go:313] Waiting for caches to sync for node config
I0916 17:16:02.454376 1 shared_informer.go:320] Caches are synced for node config
I0916 17:16:02.454423 1 shared_informer.go:320] Caches are synced for service config
I0916 17:16:02.454448 1 shared_informer.go:320] Caches are synced for endpoint slice config
==> kube-scheduler [71598060e153] <==
E0916 17:15:50.960483 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0916 17:15:50.960008 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E0916 17:15:50.960493 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"statefulsets\" in API group \"apps\" at the cluster scope" logger="UnhandledError"
E0916 17:15:50.960528 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
W0916 17:15:50.959849 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
E0916 17:15:50.960581 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError"
W0916 17:15:50.960624 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
E0916 17:15:50.960640 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicasets\" in API group \"apps\" at the cluster scope" logger="UnhandledError"
W0916 17:15:51.783739 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
E0916 17:15:51.783781 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicationcontrollers\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0916 17:15:51.813061 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E0916 17:15:51.813107 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User \"system:kube-scheduler\" cannot list resource \"services\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0916 17:15:51.892590 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E0916 17:15:51.892636 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"storageclasses\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
W0916 17:15:51.963473 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E0916 17:15:51.963509 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
W0916 17:15:52.015875 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
E0916 17:15:52.015922 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicasets\" in API group \"apps\" at the cluster scope" logger="UnhandledError"
W0916 17:15:52.015880 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
E0916 17:15:52.015967 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0916 17:15:52.023098 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E0916 17:15:52.023140 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0916 17:15:52.068599 1 reflector.go:561] runtime/asm_amd64.s:1695: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
E0916 17:15:52.068641 1 reflector.go:158] "Unhandled Error" err="runtime/asm_amd64.s:1695: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"extension-apiserver-authentication\" is forbidden: User \"system:kube-scheduler\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\"" logger="UnhandledError"
I0916 17:15:54.058149 1 shared_informer.go:320] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Sep 16 17:28:37 addons-539053 kubelet[2444]: I0916 17:28:37.575222 2444 reconciler_common.go:288] "Volume detached for volume \"kube-api-access-27pt7\" (UniqueName: \"kubernetes.io/projected/7d00cfb5-8716-43fc-a702-f7bfee1398e8-kube-api-access-27pt7\") on node \"addons-539053\" DevicePath \"\""
Sep 16 17:28:38 addons-539053 kubelet[2444]: I0916 17:28:38.300733 2444 scope.go:117] "RemoveContainer" containerID="076d4be4b58e4948fa5151c3c46abe6d974fb02a9bc025801258f081bab349f3"
Sep 16 17:28:38 addons-539053 kubelet[2444]: I0916 17:28:38.304827 2444 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="default/hello-world-app-55bf9c44b4-t96vs" podStartSLOduration=2.032126352 podStartE2EDuration="7.304806731s" podCreationTimestamp="2024-09-16 17:28:31 +0000 UTC" firstStartedPulling="2024-09-16 17:28:32.17695026 +0000 UTC m=+758.944708611" lastFinishedPulling="2024-09-16 17:28:37.449630646 +0000 UTC m=+764.217388990" observedRunningTime="2024-09-16 17:28:38.304748894 +0000 UTC m=+765.072507254" watchObservedRunningTime="2024-09-16 17:28:38.304806731 +0000 UTC m=+765.072565094"
Sep 16 17:28:39 addons-539053 kubelet[2444]: I0916 17:28:39.311810 2444 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7d00cfb5-8716-43fc-a702-f7bfee1398e8" path="/var/lib/kubelet/pods/7d00cfb5-8716-43fc-a702-f7bfee1398e8/volumes"
Sep 16 17:28:39 addons-539053 kubelet[2444]: I0916 17:28:39.686307 2444 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"gcp-creds\" (UniqueName: \"kubernetes.io/host-path/652bfafb-557c-47cb-954a-a64b55d522e1-gcp-creds\") pod \"652bfafb-557c-47cb-954a-a64b55d522e1\" (UID: \"652bfafb-557c-47cb-954a-a64b55d522e1\") "
Sep 16 17:28:39 addons-539053 kubelet[2444]: I0916 17:28:39.686372 2444 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7pxcl\" (UniqueName: \"kubernetes.io/projected/652bfafb-557c-47cb-954a-a64b55d522e1-kube-api-access-7pxcl\") pod \"652bfafb-557c-47cb-954a-a64b55d522e1\" (UID: \"652bfafb-557c-47cb-954a-a64b55d522e1\") "
Sep 16 17:28:39 addons-539053 kubelet[2444]: I0916 17:28:39.686419 2444 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/652bfafb-557c-47cb-954a-a64b55d522e1-gcp-creds" (OuterVolumeSpecName: "gcp-creds") pod "652bfafb-557c-47cb-954a-a64b55d522e1" (UID: "652bfafb-557c-47cb-954a-a64b55d522e1"). InnerVolumeSpecName "gcp-creds". PluginName "kubernetes.io/host-path", VolumeGidValue ""
Sep 16 17:28:39 addons-539053 kubelet[2444]: I0916 17:28:39.688205 2444 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/652bfafb-557c-47cb-954a-a64b55d522e1-kube-api-access-7pxcl" (OuterVolumeSpecName: "kube-api-access-7pxcl") pod "652bfafb-557c-47cb-954a-a64b55d522e1" (UID: "652bfafb-557c-47cb-954a-a64b55d522e1"). InnerVolumeSpecName "kube-api-access-7pxcl". PluginName "kubernetes.io/projected", VolumeGidValue ""
Sep 16 17:28:39 addons-539053 kubelet[2444]: I0916 17:28:39.787362 2444 reconciler_common.go:288] "Volume detached for volume \"gcp-creds\" (UniqueName: \"kubernetes.io/host-path/652bfafb-557c-47cb-954a-a64b55d522e1-gcp-creds\") on node \"addons-539053\" DevicePath \"\""
Sep 16 17:28:39 addons-539053 kubelet[2444]: I0916 17:28:39.787399 2444 reconciler_common.go:288] "Volume detached for volume \"kube-api-access-7pxcl\" (UniqueName: \"kubernetes.io/projected/652bfafb-557c-47cb-954a-a64b55d522e1-kube-api-access-7pxcl\") on node \"addons-539053\" DevicePath \"\""
Sep 16 17:28:40 addons-539053 kubelet[2444]: E0916 17:28:40.306282 2444 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"busybox\" with ImagePullBackOff: \"Back-off pulling image \\\"gcr.io/k8s-minikube/busybox:1.28.4-glibc\\\"\"" pod="default/busybox" podUID="b96b00eb-2795-4229-96ae-80796f8fb299"
Sep 16 17:28:40 addons-539053 kubelet[2444]: I0916 17:28:40.331499 2444 scope.go:117] "RemoveContainer" containerID="dfd31e12ab9bfbc878163c2a80b52317b5ad35e716370d52f09f91e8aacd5aba"
Sep 16 17:28:40 addons-539053 kubelet[2444]: I0916 17:28:40.347552 2444 scope.go:117] "RemoveContainer" containerID="dfd31e12ab9bfbc878163c2a80b52317b5ad35e716370d52f09f91e8aacd5aba"
Sep 16 17:28:40 addons-539053 kubelet[2444]: E0916 17:28:40.349671 2444 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = Unknown desc = Error response from daemon: No such container: dfd31e12ab9bfbc878163c2a80b52317b5ad35e716370d52f09f91e8aacd5aba" containerID="dfd31e12ab9bfbc878163c2a80b52317b5ad35e716370d52f09f91e8aacd5aba"
Sep 16 17:28:40 addons-539053 kubelet[2444]: I0916 17:28:40.349716 2444 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"docker","ID":"dfd31e12ab9bfbc878163c2a80b52317b5ad35e716370d52f09f91e8aacd5aba"} err="failed to get container status \"dfd31e12ab9bfbc878163c2a80b52317b5ad35e716370d52f09f91e8aacd5aba\": rpc error: code = Unknown desc = Error response from daemon: No such container: dfd31e12ab9bfbc878163c2a80b52317b5ad35e716370d52f09f91e8aacd5aba"
Sep 16 17:28:40 addons-539053 kubelet[2444]: I0916 17:28:40.349752 2444 scope.go:117] "RemoveContainer" containerID="4c73f7e2709ed5d4d891821b20b365174723c0b3873e814415e40e2ddc6f9e2f"
Sep 16 17:28:40 addons-539053 kubelet[2444]: I0916 17:28:40.365440 2444 scope.go:117] "RemoveContainer" containerID="4c73f7e2709ed5d4d891821b20b365174723c0b3873e814415e40e2ddc6f9e2f"
Sep 16 17:28:40 addons-539053 kubelet[2444]: E0916 17:28:40.366143 2444 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = Unknown desc = Error response from daemon: No such container: 4c73f7e2709ed5d4d891821b20b365174723c0b3873e814415e40e2ddc6f9e2f" containerID="4c73f7e2709ed5d4d891821b20b365174723c0b3873e814415e40e2ddc6f9e2f"
Sep 16 17:28:40 addons-539053 kubelet[2444]: I0916 17:28:40.366191 2444 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"docker","ID":"4c73f7e2709ed5d4d891821b20b365174723c0b3873e814415e40e2ddc6f9e2f"} err="failed to get container status \"4c73f7e2709ed5d4d891821b20b365174723c0b3873e814415e40e2ddc6f9e2f\": rpc error: code = Unknown desc = Error response from daemon: No such container: 4c73f7e2709ed5d4d891821b20b365174723c0b3873e814415e40e2ddc6f9e2f"
Sep 16 17:28:40 addons-539053 kubelet[2444]: I0916 17:28:40.391498 2444 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rs54p\" (UniqueName: \"kubernetes.io/projected/556af332-2257-4db0-adcb-aca469cf992d-kube-api-access-rs54p\") pod \"556af332-2257-4db0-adcb-aca469cf992d\" (UID: \"556af332-2257-4db0-adcb-aca469cf992d\") "
Sep 16 17:28:40 addons-539053 kubelet[2444]: I0916 17:28:40.391560 2444 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjrql\" (UniqueName: \"kubernetes.io/projected/4849ea19-88f6-4fbc-ba0f-e290ee2d0d80-kube-api-access-pjrql\") pod \"4849ea19-88f6-4fbc-ba0f-e290ee2d0d80\" (UID: \"4849ea19-88f6-4fbc-ba0f-e290ee2d0d80\") "
Sep 16 17:28:40 addons-539053 kubelet[2444]: I0916 17:28:40.393425 2444 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/556af332-2257-4db0-adcb-aca469cf992d-kube-api-access-rs54p" (OuterVolumeSpecName: "kube-api-access-rs54p") pod "556af332-2257-4db0-adcb-aca469cf992d" (UID: "556af332-2257-4db0-adcb-aca469cf992d"). InnerVolumeSpecName "kube-api-access-rs54p". PluginName "kubernetes.io/projected", VolumeGidValue ""
Sep 16 17:28:40 addons-539053 kubelet[2444]: I0916 17:28:40.393585 2444 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4849ea19-88f6-4fbc-ba0f-e290ee2d0d80-kube-api-access-pjrql" (OuterVolumeSpecName: "kube-api-access-pjrql") pod "4849ea19-88f6-4fbc-ba0f-e290ee2d0d80" (UID: "4849ea19-88f6-4fbc-ba0f-e290ee2d0d80"). InnerVolumeSpecName "kube-api-access-pjrql". PluginName "kubernetes.io/projected", VolumeGidValue ""
Sep 16 17:28:40 addons-539053 kubelet[2444]: I0916 17:28:40.491824 2444 reconciler_common.go:288] "Volume detached for volume \"kube-api-access-rs54p\" (UniqueName: \"kubernetes.io/projected/556af332-2257-4db0-adcb-aca469cf992d-kube-api-access-rs54p\") on node \"addons-539053\" DevicePath \"\""
Sep 16 17:28:40 addons-539053 kubelet[2444]: I0916 17:28:40.491855 2444 reconciler_common.go:288] "Volume detached for volume \"kube-api-access-pjrql\" (UniqueName: \"kubernetes.io/projected/4849ea19-88f6-4fbc-ba0f-e290ee2d0d80-kube-api-access-pjrql\") on node \"addons-539053\" DevicePath \"\""
==> storage-provisioner [81f5330bbecc] <==
I0916 17:16:06.869134 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I0916 17:16:06.953264 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I0916 17:16:06.953322 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I0916 17:16:06.966188 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I0916 17:16:06.966400 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_addons-539053_e435f71f-8eeb-43e9-b83f-f401240355a1!
I0916 17:16:06.967455 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"9a3863be-6285-47cc-9c29-e5f5df8524de", APIVersion:"v1", ResourceVersion:"617", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' addons-539053_e435f71f-8eeb-43e9-b83f-f401240355a1 became leader
I0916 17:16:07.067858 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_addons-539053_e435f71f-8eeb-43e9-b83f-f401240355a1!
-- /stdout --
helpers_test.go:254: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p addons-539053 -n addons-539053
helpers_test.go:261: (dbg) Run: kubectl --context addons-539053 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:272: non-running pods: busybox
helpers_test.go:274: ======> post-mortem[TestAddons/parallel/Registry]: describe non-running pods <======
helpers_test.go:277: (dbg) Run: kubectl --context addons-539053 describe pod busybox
helpers_test.go:282: (dbg) kubectl --context addons-539053 describe pod busybox:
-- stdout --
Name: busybox
Namespace: default
Priority: 0
Service Account: default
Node: addons-539053/192.168.49.2
Start Time: Mon, 16 Sep 2024 17:19:27 +0000
Labels: integration-test=busybox
Annotations: <none>
Status: Pending
IP: 10.244.0.28
IPs:
IP: 10.244.0.28
Containers:
busybox:
Container ID:
Image: gcr.io/k8s-minikube/busybox:1.28.4-glibc
Image ID:
Port: <none>
Host Port: <none>
Command:
sleep
3600
State: Waiting
Reason: ImagePullBackOff
Ready: False
Restart Count: 0
Environment:
GOOGLE_APPLICATION_CREDENTIALS: /google-app-creds.json
PROJECT_ID: this_is_fake
GCP_PROJECT: this_is_fake
GCLOUD_PROJECT: this_is_fake
GOOGLE_CLOUD_PROJECT: this_is_fake
CLOUDSDK_CORE_PROJECT: this_is_fake
Mounts:
/google-app-creds.json from gcp-creds (ro)
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-778rl (ro)
Conditions:
Type Status
PodReadyToStartContainers True
Initialized True
Ready False
ContainersReady False
PodScheduled True
Volumes:
kube-api-access-778rl:
Type: Projected (a volume that contains injected data from multiple sources)
TokenExpirationSeconds: 3607
ConfigMapName: kube-root-ca.crt
ConfigMapOptional: <nil>
DownwardAPI: true
gcp-creds:
Type: HostPath (bare host directory volume)
Path: /var/lib/minikube/google_application_credentials.json
HostPathType: File
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 9m14s default-scheduler Successfully assigned default/busybox to addons-539053
Warning Failed 7m52s (x6 over 9m13s) kubelet Error: ImagePullBackOff
Normal Pulling 7m39s (x4 over 9m14s) kubelet Pulling image "gcr.io/k8s-minikube/busybox:1.28.4-glibc"
Warning Failed 7m39s (x4 over 9m14s) kubelet Failed to pull image "gcr.io/k8s-minikube/busybox:1.28.4-glibc": Error response from daemon: Head "https://gcr.io/v2/k8s-minikube/busybox/manifests/1.28.4-glibc": unauthorized: authentication failed
Warning Failed 7m39s (x4 over 9m14s) kubelet Error: ErrImagePull
Normal BackOff 4m11s (x21 over 9m13s) kubelet Back-off pulling image "gcr.io/k8s-minikube/busybox:1.28.4-glibc"
-- /stdout --
helpers_test.go:285: <<< TestAddons/parallel/Registry FAILED: end of post-mortem logs <<<
helpers_test.go:286: ---------------------/post-mortem---------------------------------
--- FAIL: TestAddons/parallel/Registry (72.43s)