=== RUN TestAddons/parallel/Registry
=== PAUSE TestAddons/parallel/Registry
=== CONT TestAddons/parallel/Registry
addons_test.go:332: registry stabilized in 2.964117ms
addons_test.go:334: (dbg) TestAddons/parallel/Registry: waiting 6m0s for pods matching "actual-registry=true" in namespace "kube-system" ...
helpers_test.go:344: "registry-66c9cd494c-mdbsb" [6646693e-e468-4f8c-a209-9f028e31da67] Running
addons_test.go:334: (dbg) TestAddons/parallel/Registry: actual-registry=true healthy within 6.002723714s
addons_test.go:337: (dbg) TestAddons/parallel/Registry: waiting 10m0s for pods matching "registry-proxy=true" in namespace "kube-system" ...
helpers_test.go:344: "registry-proxy-fjxbz" [6340cd55-7e16-4315-8b01-5e879a2b0d76] Running
addons_test.go:337: (dbg) TestAddons/parallel/Registry: registry-proxy=true healthy within 5.002740614s
addons_test.go:342: (dbg) Run: kubectl --context addons-207808 delete po -l run=registry-test --now
addons_test.go:347: (dbg) Run: kubectl --context addons-207808 run --rm registry-test --restart=Never --image=gcr.io/k8s-minikube/busybox -it -- sh -c "wget --spider -S http://registry.kube-system.svc.cluster.local"
addons_test.go:347: (dbg) Non-zero exit: kubectl --context addons-207808 run --rm registry-test --restart=Never --image=gcr.io/k8s-minikube/busybox -it -- sh -c "wget --spider -S http://registry.kube-system.svc.cluster.local": exit status 1 (1m0.077011015s)
-- stdout --
pod "registry-test" deleted
-- /stdout --
** stderr **
error: timed out waiting for the condition
** /stderr **
addons_test.go:349: failed to hit registry.kube-system.svc.cluster.local. args "kubectl --context addons-207808 run --rm registry-test --restart=Never --image=gcr.io/k8s-minikube/busybox -it -- sh -c \"wget --spider -S http://registry.kube-system.svc.cluster.local\"" failed: exit status 1
addons_test.go:353: expected curl response be "HTTP/1.1 200", but got *pod "registry-test" deleted
*
addons_test.go:361: (dbg) Run: out/minikube-linux-amd64 -p addons-207808 ip
2024/09/12 21:42:50 [DEBUG] GET http://192.168.49.2:5000
addons_test.go:390: (dbg) Run: out/minikube-linux-amd64 -p addons-207808 addons disable registry --alsologtostderr -v=1
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:230: ======> post-mortem[TestAddons/parallel/Registry]: docker inspect <======
helpers_test.go:231: (dbg) Run: docker inspect addons-207808
helpers_test.go:235: (dbg) docker inspect addons-207808:
-- stdout --
[
{
"Id": "46d5993b8529191d590b4bd4995f87689a3729efa0f26a999bb5e8711add198d",
"Created": "2024-09-12T21:29:47.830700097Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 14638,
"ExitCode": 0,
"Error": "",
"StartedAt": "2024-09-12T21:29:47.964008126Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:1e046fff9d873d0625e7bcc757c3514a16d475711d13430b9690fa498decc3a8",
"ResolvConfPath": "/var/lib/docker/containers/46d5993b8529191d590b4bd4995f87689a3729efa0f26a999bb5e8711add198d/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/46d5993b8529191d590b4bd4995f87689a3729efa0f26a999bb5e8711add198d/hostname",
"HostsPath": "/var/lib/docker/containers/46d5993b8529191d590b4bd4995f87689a3729efa0f26a999bb5e8711add198d/hosts",
"LogPath": "/var/lib/docker/containers/46d5993b8529191d590b4bd4995f87689a3729efa0f26a999bb5e8711add198d/46d5993b8529191d590b4bd4995f87689a3729efa0f26a999bb5e8711add198d-json.log",
"Name": "/addons-207808",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"addons-207808:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {
"max-size": "100m"
}
},
"NetworkMode": "addons-207808",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 4194304000,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 8388608000,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"LowerDir": "/var/lib/docker/overlay2/bebd4167dc6ae140c935d93fbe82abd9f6c2bfed6d6220fa9d55349c6b1adb29-init/diff:/var/lib/docker/overlay2/a3952d20b945774e14a25a8bf698b00862be22019b42328b7689b583b03e6963/diff",
"MergedDir": "/var/lib/docker/overlay2/bebd4167dc6ae140c935d93fbe82abd9f6c2bfed6d6220fa9d55349c6b1adb29/merged",
"UpperDir": "/var/lib/docker/overlay2/bebd4167dc6ae140c935d93fbe82abd9f6c2bfed6d6220fa9d55349c6b1adb29/diff",
"WorkDir": "/var/lib/docker/overlay2/bebd4167dc6ae140c935d93fbe82abd9f6c2bfed6d6220fa9d55349c6b1adb29/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "addons-207808",
"Source": "/var/lib/docker/volumes/addons-207808/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "addons-207808",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726156396-19616@sha256:66b06a42534e914a5c8ad765d7508a93a34031939ec9a6b3a818ef0a444ff889",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "addons-207808",
"name.minikube.sigs.k8s.io": "addons-207808",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "35eee3aee247cc6527c8c38a2e2b7b31a1f5ae32ee73cd312dc8e8bce4c2d597",
"SandboxKey": "/var/run/docker/netns/35eee3aee247",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32768"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32769"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32772"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32770"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32771"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"addons-207808": {
"IPAMConfig": {
"IPv4Address": "192.168.49.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "02:42:c0:a8:31:02",
"DriverOpts": null,
"NetworkID": "3b892a7412044c1e7d75238359e45f33a1841401b47fb996d4dffacf20c04e0d",
"EndpointID": "aa2e652f07156f27cf455f58bcf8f6618dbcc4f2b6ddfbf833de1512c5c596bc",
"Gateway": "192.168.49.1",
"IPAddress": "192.168.49.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"addons-207808",
"46d5993b8529"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:239: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p addons-207808 -n addons-207808
helpers_test.go:244: <<< TestAddons/parallel/Registry FAILED: start of post-mortem logs <<<
helpers_test.go:245: ======> post-mortem[TestAddons/parallel/Registry]: minikube logs <======
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 -p addons-207808 logs -n 25
helpers_test.go:252: TestAddons/parallel/Registry logs:
-- stdout --
==> Audit <==
|---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
| Command | Args | Profile | User | Version | Start Time | End Time |
|---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
| delete | -p download-docker-093887 | download-docker-093887 | jenkins | v1.34.0 | 12 Sep 24 21:29 UTC | 12 Sep 24 21:29 UTC |
| start | --download-only -p | binary-mirror-374984 | jenkins | v1.34.0 | 12 Sep 24 21:29 UTC | |
| | binary-mirror-374984 | | | | | |
| | --alsologtostderr | | | | | |
| | --binary-mirror | | | | | |
| | http://127.0.0.1:41283 | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| delete | -p binary-mirror-374984 | binary-mirror-374984 | jenkins | v1.34.0 | 12 Sep 24 21:29 UTC | 12 Sep 24 21:29 UTC |
| addons | enable dashboard -p | addons-207808 | jenkins | v1.34.0 | 12 Sep 24 21:29 UTC | |
| | addons-207808 | | | | | |
| addons | disable dashboard -p | addons-207808 | jenkins | v1.34.0 | 12 Sep 24 21:29 UTC | |
| | addons-207808 | | | | | |
| start | -p addons-207808 --wait=true | addons-207808 | jenkins | v1.34.0 | 12 Sep 24 21:29 UTC | 12 Sep 24 21:32 UTC |
| | --memory=4000 --alsologtostderr | | | | | |
| | --addons=registry | | | | | |
| | --addons=metrics-server | | | | | |
| | --addons=volumesnapshots | | | | | |
| | --addons=csi-hostpath-driver | | | | | |
| | --addons=gcp-auth | | | | | |
| | --addons=cloud-spanner | | | | | |
| | --addons=inspektor-gadget | | | | | |
| | --addons=storage-provisioner-rancher | | | | | |
| | --addons=nvidia-device-plugin | | | | | |
| | --addons=yakd --addons=volcano | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| | --addons=ingress | | | | | |
| | --addons=ingress-dns | | | | | |
| | --addons=helm-tiller | | | | | |
| addons | addons-207808 addons disable | addons-207808 | jenkins | v1.34.0 | 12 Sep 24 21:33 UTC | 12 Sep 24 21:33 UTC |
| | volcano --alsologtostderr -v=1 | | | | | |
| addons | addons-207808 addons disable | addons-207808 | jenkins | v1.34.0 | 12 Sep 24 21:41 UTC | 12 Sep 24 21:41 UTC |
| | yakd --alsologtostderr -v=1 | | | | | |
| addons | addons-207808 addons disable | addons-207808 | jenkins | v1.34.0 | 12 Sep 24 21:41 UTC | 12 Sep 24 21:41 UTC |
| | helm-tiller --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| addons | disable nvidia-device-plugin | addons-207808 | jenkins | v1.34.0 | 12 Sep 24 21:41 UTC | 12 Sep 24 21:41 UTC |
| | -p addons-207808 | | | | | |
| ssh | addons-207808 ssh cat | addons-207808 | jenkins | v1.34.0 | 12 Sep 24 21:42 UTC | 12 Sep 24 21:42 UTC |
| | /opt/local-path-provisioner/pvc-b1ba2409-c488-4cdf-b0b8-4d252d606c73_default_test-pvc/file1 | | | | | |
| addons | disable cloud-spanner -p | addons-207808 | jenkins | v1.34.0 | 12 Sep 24 21:42 UTC | 12 Sep 24 21:42 UTC |
| | addons-207808 | | | | | |
| addons | addons-207808 addons disable | addons-207808 | jenkins | v1.34.0 | 12 Sep 24 21:42 UTC | 12 Sep 24 21:42 UTC |
| | storage-provisioner-rancher | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | enable headlamp | addons-207808 | jenkins | v1.34.0 | 12 Sep 24 21:42 UTC | 12 Sep 24 21:42 UTC |
| | -p addons-207808 | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-207808 addons disable | addons-207808 | jenkins | v1.34.0 | 12 Sep 24 21:42 UTC | 12 Sep 24 21:42 UTC |
| | headlamp --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| addons | addons-207808 addons | addons-207808 | jenkins | v1.34.0 | 12 Sep 24 21:42 UTC | 12 Sep 24 21:42 UTC |
| | disable csi-hostpath-driver | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-207808 addons | addons-207808 | jenkins | v1.34.0 | 12 Sep 24 21:42 UTC | 12 Sep 24 21:42 UTC |
| | disable volumesnapshots | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | disable inspektor-gadget -p | addons-207808 | jenkins | v1.34.0 | 12 Sep 24 21:42 UTC | 12 Sep 24 21:42 UTC |
| | addons-207808 | | | | | |
| addons | addons-207808 addons | addons-207808 | jenkins | v1.34.0 | 12 Sep 24 21:42 UTC | 12 Sep 24 21:42 UTC |
| | disable metrics-server | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| ssh | addons-207808 ssh curl -s | addons-207808 | jenkins | v1.34.0 | 12 Sep 24 21:42 UTC | 12 Sep 24 21:42 UTC |
| | http://127.0.0.1/ -H 'Host: | | | | | |
| | nginx.example.com' | | | | | |
| ip | addons-207808 ip | addons-207808 | jenkins | v1.34.0 | 12 Sep 24 21:42 UTC | 12 Sep 24 21:42 UTC |
| addons | addons-207808 addons disable | addons-207808 | jenkins | v1.34.0 | 12 Sep 24 21:42 UTC | 12 Sep 24 21:42 UTC |
| | ingress-dns --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| addons | addons-207808 addons disable | addons-207808 | jenkins | v1.34.0 | 12 Sep 24 21:42 UTC | 12 Sep 24 21:42 UTC |
| | ingress --alsologtostderr -v=1 | | | | | |
| ip | addons-207808 ip | addons-207808 | jenkins | v1.34.0 | 12 Sep 24 21:42 UTC | 12 Sep 24 21:42 UTC |
| addons | addons-207808 addons disable | addons-207808 | jenkins | v1.34.0 | 12 Sep 24 21:42 UTC | 12 Sep 24 21:42 UTC |
| | registry --alsologtostderr | | | | | |
| | -v=1 | | | | | |
|---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
==> Last Start <==
Log file created at: 2024/09/12 21:29:26
Running on machine: ubuntu-20-agent
Binary: Built with gc go1.22.5 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I0912 21:29:26.510487 13904 out.go:345] Setting OutFile to fd 1 ...
I0912 21:29:26.510713 13904 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0912 21:29:26.510720 13904 out.go:358] Setting ErrFile to fd 2...
I0912 21:29:26.510725 13904 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0912 21:29:26.510891 13904 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19616-5723/.minikube/bin
I0912 21:29:26.511484 13904 out.go:352] Setting JSON to false
I0912 21:29:26.512268 13904 start.go:129] hostinfo: {"hostname":"ubuntu-20-agent","uptime":709,"bootTime":1726175857,"procs":172,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1067-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I0912 21:29:26.512325 13904 start.go:139] virtualization: kvm guest
I0912 21:29:26.514462 13904 out.go:177] * [addons-207808] minikube v1.34.0 on Ubuntu 20.04 (kvm/amd64)
I0912 21:29:26.515855 13904 notify.go:220] Checking for updates...
I0912 21:29:26.515874 13904 out.go:177] - MINIKUBE_LOCATION=19616
I0912 21:29:26.517167 13904 out.go:177] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I0912 21:29:26.518513 13904 out.go:177] - KUBECONFIG=/home/jenkins/minikube-integration/19616-5723/kubeconfig
I0912 21:29:26.519810 13904 out.go:177] - MINIKUBE_HOME=/home/jenkins/minikube-integration/19616-5723/.minikube
I0912 21:29:26.521203 13904 out.go:177] - MINIKUBE_BIN=out/minikube-linux-amd64
I0912 21:29:26.522515 13904 out.go:177] - MINIKUBE_FORCE_SYSTEMD=
I0912 21:29:26.523896 13904 driver.go:394] Setting default libvirt URI to qemu:///system
I0912 21:29:26.545225 13904 docker.go:123] docker version: linux-27.2.1:Docker Engine - Community
I0912 21:29:26.545330 13904 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0912 21:29:26.593830 13904 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:26 OomKillDisable:true NGoroutines:45 SystemTime:2024-09-12 21:29:26.58497424 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1067-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:x86_
64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33647939584 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent Labels:[] ExperimentalBuild:false ServerVersion:27.2.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:7f7fdf5fed64eb6a7caf99b3e12efcf9d60e311c Expected:7f7fdf5fed64eb6a7caf99b3e12efcf9d60e311c} RuncCommit:{ID:v1.1.14-0-g2c9f560 Expected:v1.1.14-0-g2c9f560} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors
:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.16.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.29.2] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0912 21:29:26.593946 13904 docker.go:318] overlay module found
I0912 21:29:26.595723 13904 out.go:177] * Using the docker driver based on user configuration
I0912 21:29:26.597020 13904 start.go:297] selected driver: docker
I0912 21:29:26.597041 13904 start.go:901] validating driver "docker" against <nil>
I0912 21:29:26.597054 13904 start.go:912] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I0912 21:29:26.597805 13904 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0912 21:29:26.642540 13904 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:26 OomKillDisable:true NGoroutines:45 SystemTime:2024-09-12 21:29:26.633767514 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1067-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:x86
_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33647939584 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent Labels:[] ExperimentalBuild:false ServerVersion:27.2.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:7f7fdf5fed64eb6a7caf99b3e12efcf9d60e311c Expected:7f7fdf5fed64eb6a7caf99b3e12efcf9d60e311c} RuncCommit:{ID:v1.1.14-0-g2c9f560 Expected:v1.1.14-0-g2c9f560} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerError
s:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.16.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.29.2] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0912 21:29:26.642737 13904 start_flags.go:310] no existing cluster config was found, will generate one from the flags
I0912 21:29:26.642942 13904 start_flags.go:947] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0912 21:29:26.644840 13904 out.go:177] * Using Docker driver with root privileges
I0912 21:29:26.646178 13904 cni.go:84] Creating CNI manager for ""
I0912 21:29:26.646198 13904 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0912 21:29:26.646208 13904 start_flags.go:319] Found "bridge CNI" CNI - setting NetworkPlugin=cni
I0912 21:29:26.646267 13904 start.go:340] cluster config:
{Name:addons-207808 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726156396-19616@sha256:66b06a42534e914a5c8ad765d7508a93a34031939ec9a6b3a818ef0a444ff889 Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.1 ClusterName:addons-207808 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime
:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock:
SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0912 21:29:26.647568 13904 out.go:177] * Starting "addons-207808" primary control-plane node in "addons-207808" cluster
I0912 21:29:26.648798 13904 cache.go:121] Beginning downloading kic base image for docker with docker
I0912 21:29:26.650134 13904 out.go:177] * Pulling base image v0.0.45-1726156396-19616 ...
I0912 21:29:26.651478 13904 preload.go:131] Checking if preload exists for k8s version v1.31.1 and runtime docker
I0912 21:29:26.651500 13904 image.go:79] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726156396-19616@sha256:66b06a42534e914a5c8ad765d7508a93a34031939ec9a6b3a818ef0a444ff889 in local docker daemon
I0912 21:29:26.651509 13904 preload.go:146] Found local preload: /home/jenkins/minikube-integration/19616-5723/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.1-docker-overlay2-amd64.tar.lz4
I0912 21:29:26.651521 13904 cache.go:56] Caching tarball of preloaded images
I0912 21:29:26.651629 13904 preload.go:172] Found /home/jenkins/minikube-integration/19616-5723/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.1-docker-overlay2-amd64.tar.lz4 in cache, skipping download
I0912 21:29:26.651647 13904 cache.go:59] Finished verifying existence of preloaded tar for v1.31.1 on docker
I0912 21:29:26.651967 13904 profile.go:143] Saving config to /home/jenkins/minikube-integration/19616-5723/.minikube/profiles/addons-207808/config.json ...
I0912 21:29:26.651990 13904 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19616-5723/.minikube/profiles/addons-207808/config.json: {Name:mk8481da6b54576dc871eb043aa2d3b29d139204 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0912 21:29:26.667151 13904 cache.go:149] Downloading gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726156396-19616@sha256:66b06a42534e914a5c8ad765d7508a93a34031939ec9a6b3a818ef0a444ff889 to local cache
I0912 21:29:26.667254 13904 image.go:63] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726156396-19616@sha256:66b06a42534e914a5c8ad765d7508a93a34031939ec9a6b3a818ef0a444ff889 in local cache directory
I0912 21:29:26.667269 13904 image.go:66] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726156396-19616@sha256:66b06a42534e914a5c8ad765d7508a93a34031939ec9a6b3a818ef0a444ff889 in local cache directory, skipping pull
I0912 21:29:26.667273 13904 image.go:135] gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726156396-19616@sha256:66b06a42534e914a5c8ad765d7508a93a34031939ec9a6b3a818ef0a444ff889 exists in cache, skipping pull
I0912 21:29:26.667281 13904 cache.go:152] successfully saved gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726156396-19616@sha256:66b06a42534e914a5c8ad765d7508a93a34031939ec9a6b3a818ef0a444ff889 as a tarball
I0912 21:29:26.667288 13904 cache.go:162] Loading gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726156396-19616@sha256:66b06a42534e914a5c8ad765d7508a93a34031939ec9a6b3a818ef0a444ff889 from local cache
I0912 21:29:38.593119 13904 cache.go:164] successfully loaded and using gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726156396-19616@sha256:66b06a42534e914a5c8ad765d7508a93a34031939ec9a6b3a818ef0a444ff889 from cached tarball
I0912 21:29:38.593164 13904 cache.go:194] Successfully downloaded all kic artifacts
I0912 21:29:38.593202 13904 start.go:360] acquireMachinesLock for addons-207808: {Name:mk9e28e5e398d3a60a4034b6150283157ca43597 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0912 21:29:38.593295 13904 start.go:364] duration metric: took 74.623µs to acquireMachinesLock for "addons-207808"
I0912 21:29:38.593319 13904 start.go:93] Provisioning new machine with config: &{Name:addons-207808 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726156396-19616@sha256:66b06a42534e914a5c8ad765d7508a93a34031939ec9a6b3a818ef0a444ff889 Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.1 ClusterName:addons-207808 Namespace:default APIServerHAVIP: APIServerName:min
ikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false Cust
omQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}
I0912 21:29:38.593403 13904 start.go:125] createHost starting for "" (driver="docker")
I0912 21:29:38.595297 13904 out.go:235] * Creating docker container (CPUs=2, Memory=4000MB) ...
I0912 21:29:38.595532 13904 start.go:159] libmachine.API.Create for "addons-207808" (driver="docker")
I0912 21:29:38.595561 13904 client.go:168] LocalClient.Create starting
I0912 21:29:38.595647 13904 main.go:141] libmachine: Creating CA: /home/jenkins/minikube-integration/19616-5723/.minikube/certs/ca.pem
I0912 21:29:38.763880 13904 main.go:141] libmachine: Creating client certificate: /home/jenkins/minikube-integration/19616-5723/.minikube/certs/cert.pem
I0912 21:29:38.831665 13904 cli_runner.go:164] Run: docker network inspect addons-207808 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W0912 21:29:38.846747 13904 cli_runner.go:211] docker network inspect addons-207808 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I0912 21:29:38.846829 13904 network_create.go:284] running [docker network inspect addons-207808] to gather additional debugging logs...
I0912 21:29:38.846853 13904 cli_runner.go:164] Run: docker network inspect addons-207808
W0912 21:29:38.861333 13904 cli_runner.go:211] docker network inspect addons-207808 returned with exit code 1
I0912 21:29:38.861366 13904 network_create.go:287] error running [docker network inspect addons-207808]: docker network inspect addons-207808: exit status 1
stdout:
[]
stderr:
Error response from daemon: network addons-207808 not found
I0912 21:29:38.861391 13904 network_create.go:289] output of [docker network inspect addons-207808]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network addons-207808 not found
** /stderr **
I0912 21:29:38.861543 13904 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0912 21:29:38.876787 13904 network.go:206] using free private subnet 192.168.49.0/24: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc0018c9d90}
I0912 21:29:38.876828 13904 network_create.go:124] attempt to create docker network addons-207808 192.168.49.0/24 with gateway 192.168.49.1 and MTU of 1500 ...
I0912 21:29:38.876879 13904 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.49.0/24 --gateway=192.168.49.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=addons-207808 addons-207808
I0912 21:29:38.933636 13904 network_create.go:108] docker network addons-207808 192.168.49.0/24 created
I0912 21:29:38.933669 13904 kic.go:121] calculated static IP "192.168.49.2" for the "addons-207808" container
I0912 21:29:38.933732 13904 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0912 21:29:38.949074 13904 cli_runner.go:164] Run: docker volume create addons-207808 --label name.minikube.sigs.k8s.io=addons-207808 --label created_by.minikube.sigs.k8s.io=true
I0912 21:29:38.965269 13904 oci.go:103] Successfully created a docker volume addons-207808
I0912 21:29:38.965365 13904 cli_runner.go:164] Run: docker run --rm --name addons-207808-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-207808 --entrypoint /usr/bin/test -v addons-207808:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726156396-19616@sha256:66b06a42534e914a5c8ad765d7508a93a34031939ec9a6b3a818ef0a444ff889 -d /var/lib
I0912 21:29:43.776870 13904 cli_runner.go:217] Completed: docker run --rm --name addons-207808-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-207808 --entrypoint /usr/bin/test -v addons-207808:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726156396-19616@sha256:66b06a42534e914a5c8ad765d7508a93a34031939ec9a6b3a818ef0a444ff889 -d /var/lib: (4.81145982s)
I0912 21:29:43.776894 13904 oci.go:107] Successfully prepared a docker volume addons-207808
I0912 21:29:43.776909 13904 preload.go:131] Checking if preload exists for k8s version v1.31.1 and runtime docker
I0912 21:29:43.776926 13904 kic.go:194] Starting extracting preloaded images to volume ...
I0912 21:29:43.776981 13904 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/19616-5723/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.1-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v addons-207808:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726156396-19616@sha256:66b06a42534e914a5c8ad765d7508a93a34031939ec9a6b3a818ef0a444ff889 -I lz4 -xf /preloaded.tar -C /extractDir
I0912 21:29:47.766700 13904 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/19616-5723/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.1-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v addons-207808:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726156396-19616@sha256:66b06a42534e914a5c8ad765d7508a93a34031939ec9a6b3a818ef0a444ff889 -I lz4 -xf /preloaded.tar -C /extractDir: (3.989677319s)
I0912 21:29:47.766733 13904 kic.go:203] duration metric: took 3.989803597s to extract preloaded images to volume ...
W0912 21:29:47.766871 13904 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I0912 21:29:47.766986 13904 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0912 21:29:47.815776 13904 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname addons-207808 --name addons-207808 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-207808 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=addons-207808 --network addons-207808 --ip 192.168.49.2 --volume addons-207808:/var --security-opt apparmor=unconfined --memory=4000mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726156396-19616@sha256:66b06a42534e914a5c8ad765d7508a93a34031939ec9a6b3a818ef0a444ff889
I0912 21:29:48.121715 13904 cli_runner.go:164] Run: docker container inspect addons-207808 --format={{.State.Running}}
I0912 21:29:48.140013 13904 cli_runner.go:164] Run: docker container inspect addons-207808 --format={{.State.Status}}
I0912 21:29:48.158669 13904 cli_runner.go:164] Run: docker exec addons-207808 stat /var/lib/dpkg/alternatives/iptables
I0912 21:29:48.199657 13904 oci.go:144] the created container "addons-207808" has a running status.
I0912 21:29:48.199687 13904 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/19616-5723/.minikube/machines/addons-207808/id_rsa...
I0912 21:29:48.379535 13904 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/19616-5723/.minikube/machines/addons-207808/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0912 21:29:48.403622 13904 cli_runner.go:164] Run: docker container inspect addons-207808 --format={{.State.Status}}
I0912 21:29:48.421551 13904 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0912 21:29:48.421571 13904 kic_runner.go:114] Args: [docker exec --privileged addons-207808 chown docker:docker /home/docker/.ssh/authorized_keys]
I0912 21:29:48.541642 13904 cli_runner.go:164] Run: docker container inspect addons-207808 --format={{.State.Status}}
I0912 21:29:48.564279 13904 machine.go:93] provisionDockerMachine start ...
I0912 21:29:48.564351 13904 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-207808
I0912 21:29:48.584724 13904 main.go:141] libmachine: Using SSH client type: native
I0912 21:29:48.584930 13904 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x8375c0] 0x83a320 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0912 21:29:48.584945 13904 main.go:141] libmachine: About to run SSH command:
hostname
I0912 21:29:48.742155 13904 main.go:141] libmachine: SSH cmd err, output: <nil>: addons-207808
I0912 21:29:48.742178 13904 ubuntu.go:169] provisioning hostname "addons-207808"
I0912 21:29:48.742223 13904 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-207808
I0912 21:29:48.759557 13904 main.go:141] libmachine: Using SSH client type: native
I0912 21:29:48.759761 13904 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x8375c0] 0x83a320 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0912 21:29:48.759780 13904 main.go:141] libmachine: About to run SSH command:
sudo hostname addons-207808 && echo "addons-207808" | sudo tee /etc/hostname
I0912 21:29:48.884324 13904 main.go:141] libmachine: SSH cmd err, output: <nil>: addons-207808
I0912 21:29:48.884389 13904 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-207808
I0912 21:29:48.900874 13904 main.go:141] libmachine: Using SSH client type: native
I0912 21:29:48.901036 13904 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x8375c0] 0x83a320 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0912 21:29:48.901052 13904 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\saddons-207808' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 addons-207808/g' /etc/hosts;
else
echo '127.0.1.1 addons-207808' | sudo tee -a /etc/hosts;
fi
fi
I0912 21:29:49.014583 13904 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0912 21:29:49.014608 13904 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/19616-5723/.minikube CaCertPath:/home/jenkins/minikube-integration/19616-5723/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/19616-5723/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/19616-5723/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/19616-5723/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/19616-5723/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/19616-5723/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/19616-5723/.minikube}
I0912 21:29:49.014628 13904 ubuntu.go:177] setting up certificates
I0912 21:29:49.014641 13904 provision.go:84] configureAuth start
I0912 21:29:49.014702 13904 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-207808
I0912 21:29:49.030162 13904 provision.go:143] copyHostCerts
I0912 21:29:49.030236 13904 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19616-5723/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/19616-5723/.minikube/ca.pem (1078 bytes)
I0912 21:29:49.030362 13904 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19616-5723/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/19616-5723/.minikube/cert.pem (1123 bytes)
I0912 21:29:49.030427 13904 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19616-5723/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/19616-5723/.minikube/key.pem (1679 bytes)
I0912 21:29:49.030476 13904 provision.go:117] generating server cert: /home/jenkins/minikube-integration/19616-5723/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/19616-5723/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/19616-5723/.minikube/certs/ca-key.pem org=jenkins.addons-207808 san=[127.0.0.1 192.168.49.2 addons-207808 localhost minikube]
I0912 21:29:49.145040 13904 provision.go:177] copyRemoteCerts
I0912 21:29:49.145092 13904 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0912 21:29:49.145142 13904 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-207808
I0912 21:29:49.162150 13904 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19616-5723/.minikube/machines/addons-207808/id_rsa Username:docker}
I0912 21:29:49.247117 13904 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19616-5723/.minikube/machines/server.pem --> /etc/docker/server.pem (1208 bytes)
I0912 21:29:49.268085 13904 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19616-5723/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I0912 21:29:49.288579 13904 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19616-5723/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I0912 21:29:49.309083 13904 provision.go:87] duration metric: took 294.430185ms to configureAuth
I0912 21:29:49.309106 13904 ubuntu.go:193] setting minikube options for container-runtime
I0912 21:29:49.309283 13904 config.go:182] Loaded profile config "addons-207808": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.31.1
I0912 21:29:49.309348 13904 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-207808
I0912 21:29:49.325771 13904 main.go:141] libmachine: Using SSH client type: native
I0912 21:29:49.325985 13904 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x8375c0] 0x83a320 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0912 21:29:49.325999 13904 main.go:141] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I0912 21:29:49.442984 13904 main.go:141] libmachine: SSH cmd err, output: <nil>: overlay
I0912 21:29:49.443007 13904 ubuntu.go:71] root file system type: overlay
I0912 21:29:49.443140 13904 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I0912 21:29:49.443201 13904 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-207808
I0912 21:29:49.459989 13904 main.go:141] libmachine: Using SSH client type: native
I0912 21:29:49.460158 13904 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x8375c0] 0x83a320 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0912 21:29:49.460215 13904 main.go:141] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I0912 21:29:49.589434 13904 main.go:141] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
I0912 21:29:49.589503 13904 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-207808
I0912 21:29:49.605811 13904 main.go:141] libmachine: Using SSH client type: native
I0912 21:29:49.605974 13904 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x8375c0] 0x83a320 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0912 21:29:49.605991 13904 main.go:141] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I0912 21:29:50.274178 13904 main.go:141] libmachine: SSH cmd err, output: <nil>: --- /lib/systemd/system/docker.service 2024-09-06 12:06:41.000000000 +0000
+++ /lib/systemd/system/docker.service.new 2024-09-12 21:29:49.584594789 +0000
@@ -1,46 +1,49 @@
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
-After=network-online.target docker.socket firewalld.service containerd.service time-set.target
-Wants=network-online.target containerd.service
+BindsTo=containerd.service
+After=network-online.target firewalld.service containerd.service
+Wants=network-online.target
Requires=docker.socket
+StartLimitBurst=3
+StartLimitIntervalSec=60
[Service]
Type=notify
-# the default is not to use systemd for cgroups because the delegate issues still
-# exists and systemd currently does not support the cgroup feature set required
-# for containers run by docker
-ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
-ExecReload=/bin/kill -s HUP $MAINPID
-TimeoutStartSec=0
-RestartSec=2
-Restart=always
+Restart=on-failure
-# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
-# Both the old, and new location are accepted by systemd 229 and up, so using the old location
-# to make them work for either version of systemd.
-StartLimitBurst=3
-# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
-# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
-# this option work for either version of systemd.
-StartLimitInterval=60s
+
+# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
+# The base configuration already specifies an 'ExecStart=...' command. The first directive
+# here is to clear out that command inherited from the base configuration. Without this,
+# the command from the base configuration and the command specified here are treated as
+# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
+# will catch this invalid input and refuse to start the service with an error like:
+# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
+
+# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
+# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
+ExecStart=
+ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
+ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
+LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
-# Comment TasksMax if your systemd version does not support it.
-# Only systemd 226 and above support this option.
+# Uncomment TasksMax if your systemd version supports it.
+# Only systemd 226 and above support this version.
TasksMax=infinity
+TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
-OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
Synchronizing state of docker.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable docker
I0912 21:29:50.274204 13904 machine.go:96] duration metric: took 1.709904611s to provisionDockerMachine
I0912 21:29:50.274215 13904 client.go:171] duration metric: took 11.678649015s to LocalClient.Create
I0912 21:29:50.274233 13904 start.go:167] duration metric: took 11.67870124s to libmachine.API.Create "addons-207808"
I0912 21:29:50.274242 13904 start.go:293] postStartSetup for "addons-207808" (driver="docker")
I0912 21:29:50.274256 13904 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0912 21:29:50.274311 13904 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0912 21:29:50.274358 13904 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-207808
I0912 21:29:50.290564 13904 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19616-5723/.minikube/machines/addons-207808/id_rsa Username:docker}
I0912 21:29:50.375413 13904 ssh_runner.go:195] Run: cat /etc/os-release
I0912 21:29:50.378379 13904 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0912 21:29:50.378407 13904 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0912 21:29:50.378421 13904 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0912 21:29:50.378427 13904 info.go:137] Remote host: Ubuntu 22.04.4 LTS
I0912 21:29:50.378437 13904 filesync.go:126] Scanning /home/jenkins/minikube-integration/19616-5723/.minikube/addons for local assets ...
I0912 21:29:50.378495 13904 filesync.go:126] Scanning /home/jenkins/minikube-integration/19616-5723/.minikube/files for local assets ...
I0912 21:29:50.378518 13904 start.go:296] duration metric: took 104.268207ms for postStartSetup
I0912 21:29:50.378764 13904 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-207808
I0912 21:29:50.395475 13904 profile.go:143] Saving config to /home/jenkins/minikube-integration/19616-5723/.minikube/profiles/addons-207808/config.json ...
I0912 21:29:50.395733 13904 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0912 21:29:50.395783 13904 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-207808
I0912 21:29:50.411956 13904 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19616-5723/.minikube/machines/addons-207808/id_rsa Username:docker}
I0912 21:29:50.495377 13904 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0912 21:29:50.499285 13904 start.go:128] duration metric: took 11.905870064s to createHost
I0912 21:29:50.499309 13904 start.go:83] releasing machines lock for "addons-207808", held for 11.906003779s
I0912 21:29:50.499360 13904 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-207808
I0912 21:29:50.515299 13904 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0912 21:29:50.515300 13904 ssh_runner.go:195] Run: cat /version.json
I0912 21:29:50.515421 13904 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-207808
I0912 21:29:50.515432 13904 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-207808
I0912 21:29:50.532492 13904 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19616-5723/.minikube/machines/addons-207808/id_rsa Username:docker}
I0912 21:29:50.533009 13904 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19616-5723/.minikube/machines/addons-207808/id_rsa Username:docker}
I0912 21:29:50.614349 13904 ssh_runner.go:195] Run: systemctl --version
I0912 21:29:50.617972 13904 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0912 21:29:50.688351 13904 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I0912 21:29:50.710654 13904 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I0912 21:29:50.710720 13904 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0912 21:29:50.734788 13904 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I0912 21:29:50.734818 13904 start.go:495] detecting cgroup driver to use...
I0912 21:29:50.734851 13904 detect.go:187] detected "cgroupfs" cgroup driver on host os
I0912 21:29:50.734977 13904 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0912 21:29:50.748693 13904 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10"|' /etc/containerd/config.toml"
I0912 21:29:50.756900 13904 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0912 21:29:50.765069 13904 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I0912 21:29:50.765115 13904 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I0912 21:29:50.773490 13904 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0912 21:29:50.781907 13904 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0912 21:29:50.790212 13904 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0912 21:29:50.798514 13904 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0912 21:29:50.806773 13904 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0912 21:29:50.815185 13904 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0912 21:29:50.823612 13904 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0912 21:29:50.832105 13904 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0912 21:29:50.839580 13904 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0912 21:29:50.846569 13904 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0912 21:29:50.919523 13904 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0912 21:29:51.007514 13904 start.go:495] detecting cgroup driver to use...
I0912 21:29:51.007563 13904 detect.go:187] detected "cgroupfs" cgroup driver on host os
I0912 21:29:51.007611 13904 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I0912 21:29:51.018304 13904 cruntime.go:279] skipping containerd shutdown because we are bound to it
I0912 21:29:51.018388 13904 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0912 21:29:51.029082 13904 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I0912 21:29:51.044620 13904 ssh_runner.go:195] Run: which cri-dockerd
I0912 21:29:51.047768 13904 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I0912 21:29:51.056517 13904 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (190 bytes)
I0912 21:29:51.073334 13904 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I0912 21:29:51.167663 13904 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I0912 21:29:51.272366 13904 docker.go:574] configuring docker to use "cgroupfs" as cgroup driver...
I0912 21:29:51.272516 13904 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
I0912 21:29:51.288980 13904 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0912 21:29:51.364323 13904 ssh_runner.go:195] Run: sudo systemctl restart docker
I0912 21:29:51.603677 13904 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I0912 21:29:51.613966 13904 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0912 21:29:51.624184 13904 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I0912 21:29:51.707746 13904 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I0912 21:29:51.779576 13904 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0912 21:29:51.856088 13904 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I0912 21:29:51.867916 13904 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0912 21:29:51.877609 13904 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0912 21:29:51.948974 13904 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I0912 21:29:52.006021 13904 start.go:542] Will wait 60s for socket path /var/run/cri-dockerd.sock
I0912 21:29:52.006104 13904 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I0912 21:29:52.009331 13904 start.go:563] Will wait 60s for crictl version
I0912 21:29:52.009375 13904 ssh_runner.go:195] Run: which crictl
I0912 21:29:52.012359 13904 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0912 21:29:52.044115 13904 start.go:579] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 27.2.1
RuntimeApiVersion: v1
I0912 21:29:52.044179 13904 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0912 21:29:52.066830 13904 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0912 21:29:52.091460 13904 out.go:235] * Preparing Kubernetes v1.31.1 on Docker 27.2.1 ...
I0912 21:29:52.091522 13904 cli_runner.go:164] Run: docker network inspect addons-207808 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0912 21:29:52.106808 13904 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I0912 21:29:52.109983 13904 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0912 21:29:52.119381 13904 kubeadm.go:883] updating cluster {Name:addons-207808 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726156396-19616@sha256:66b06a42534e914a5c8ad765d7508a93a34031939ec9a6b3a818ef0a444ff889 Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.1 ClusterName:addons-207808 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNa
mes:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuF
irmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I0912 21:29:52.119489 13904 preload.go:131] Checking if preload exists for k8s version v1.31.1 and runtime docker
I0912 21:29:52.119535 13904 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0912 21:29:52.138148 13904 docker.go:685] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.31.1
registry.k8s.io/kube-scheduler:v1.31.1
registry.k8s.io/kube-controller-manager:v1.31.1
registry.k8s.io/kube-proxy:v1.31.1
registry.k8s.io/coredns/coredns:v1.11.3
registry.k8s.io/etcd:3.5.15-0
registry.k8s.io/pause:3.10
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I0912 21:29:52.138169 13904 docker.go:615] Images already preloaded, skipping extraction
I0912 21:29:52.138225 13904 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0912 21:29:52.155730 13904 docker.go:685] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.31.1
registry.k8s.io/kube-scheduler:v1.31.1
registry.k8s.io/kube-controller-manager:v1.31.1
registry.k8s.io/kube-proxy:v1.31.1
registry.k8s.io/coredns/coredns:v1.11.3
registry.k8s.io/etcd:3.5.15-0
registry.k8s.io/pause:3.10
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I0912 21:29:52.155769 13904 cache_images.go:84] Images are preloaded, skipping loading
I0912 21:29:52.155787 13904 kubeadm.go:934] updating node { 192.168.49.2 8443 v1.31.1 docker true true} ...
I0912 21:29:52.155901 13904 kubeadm.go:946] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.31.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=addons-207808 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.2
[Install]
config:
{KubernetesVersion:v1.31.1 ClusterName:addons-207808 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0912 21:29:52.155965 13904 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
I0912 21:29:52.198408 13904 cni.go:84] Creating CNI manager for ""
I0912 21:29:52.198432 13904 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0912 21:29:52.198442 13904 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
I0912 21:29:52.198463 13904 kubeadm.go:181] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8443 KubernetesVersion:v1.31.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:addons-207808 NodeName:addons-207808 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.49.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kuber
netes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I0912 21:29:52.198618 13904 kubeadm.go:187] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.49.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
name: "addons-207808"
kubeletExtraArgs:
node-ip: 192.168.49.2
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.31.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I0912 21:29:52.198679 13904 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.31.1
I0912 21:29:52.206712 13904 binaries.go:44] Found k8s binaries, skipping transfer
I0912 21:29:52.206771 13904 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I0912 21:29:52.214487 13904 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (312 bytes)
I0912 21:29:52.230090 13904 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0912 21:29:52.245611 13904 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2155 bytes)
I0912 21:29:52.261035 13904 ssh_runner.go:195] Run: grep 192.168.49.2 control-plane.minikube.internal$ /etc/hosts
I0912 21:29:52.264243 13904 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0912 21:29:52.273857 13904 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0912 21:29:52.345655 13904 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0912 21:29:52.357921 13904 certs.go:68] Setting up /home/jenkins/minikube-integration/19616-5723/.minikube/profiles/addons-207808 for IP: 192.168.49.2
I0912 21:29:52.357939 13904 certs.go:194] generating shared ca certs ...
I0912 21:29:52.357951 13904 certs.go:226] acquiring lock for ca certs: {Name:mk9f28859b4d312e5b4155554040e74e885f9892 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0912 21:29:52.358065 13904 certs.go:240] generating "minikubeCA" ca cert: /home/jenkins/minikube-integration/19616-5723/.minikube/ca.key
I0912 21:29:52.501309 13904 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19616-5723/.minikube/ca.crt ...
I0912 21:29:52.501337 13904 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19616-5723/.minikube/ca.crt: {Name:mkc56f678fc592b8474ef4912f787bfdfc458c02 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0912 21:29:52.501524 13904 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19616-5723/.minikube/ca.key ...
I0912 21:29:52.501538 13904 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19616-5723/.minikube/ca.key: {Name:mk60dc58220fcaaac43a0d3a605359feeb5f6cdf Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0912 21:29:52.501638 13904 certs.go:240] generating "proxyClientCA" ca cert: /home/jenkins/minikube-integration/19616-5723/.minikube/proxy-client-ca.key
I0912 21:29:52.670143 13904 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19616-5723/.minikube/proxy-client-ca.crt ...
I0912 21:29:52.670172 13904 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19616-5723/.minikube/proxy-client-ca.crt: {Name:mk2698372e86c4c57b679a30ca4eb2ad1efc1cf2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0912 21:29:52.670324 13904 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19616-5723/.minikube/proxy-client-ca.key ...
I0912 21:29:52.670335 13904 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19616-5723/.minikube/proxy-client-ca.key: {Name:mk345935fc27c646eeea4f4259dab61db43551ab Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0912 21:29:52.670404 13904 certs.go:256] generating profile certs ...
I0912 21:29:52.670455 13904 certs.go:363] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/19616-5723/.minikube/profiles/addons-207808/client.key
I0912 21:29:52.670468 13904 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19616-5723/.minikube/profiles/addons-207808/client.crt with IP's: []
I0912 21:29:52.871873 13904 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19616-5723/.minikube/profiles/addons-207808/client.crt ...
I0912 21:29:52.871900 13904 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19616-5723/.minikube/profiles/addons-207808/client.crt: {Name:mkd3c9ab3428fe5906101cb663ee83957d0b60a3 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0912 21:29:52.872053 13904 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19616-5723/.minikube/profiles/addons-207808/client.key ...
I0912 21:29:52.872059 13904 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19616-5723/.minikube/profiles/addons-207808/client.key: {Name:mk198c3732cf2d990e0944a2c7e8c85da4e92354 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0912 21:29:52.872123 13904 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/19616-5723/.minikube/profiles/addons-207808/apiserver.key.5b4b0a4c
I0912 21:29:52.872141 13904 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19616-5723/.minikube/profiles/addons-207808/apiserver.crt.5b4b0a4c with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.49.2]
I0912 21:29:52.969625 13904 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19616-5723/.minikube/profiles/addons-207808/apiserver.crt.5b4b0a4c ...
I0912 21:29:52.969650 13904 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19616-5723/.minikube/profiles/addons-207808/apiserver.crt.5b4b0a4c: {Name:mkd4dbc4603877a34cb4e1c2a8ef90a8dbad8496 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0912 21:29:52.969789 13904 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19616-5723/.minikube/profiles/addons-207808/apiserver.key.5b4b0a4c ...
I0912 21:29:52.969801 13904 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19616-5723/.minikube/profiles/addons-207808/apiserver.key.5b4b0a4c: {Name:mke39ebe334ed0c02224e9f7deb725dde52a4531 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0912 21:29:52.969865 13904 certs.go:381] copying /home/jenkins/minikube-integration/19616-5723/.minikube/profiles/addons-207808/apiserver.crt.5b4b0a4c -> /home/jenkins/minikube-integration/19616-5723/.minikube/profiles/addons-207808/apiserver.crt
I0912 21:29:52.969930 13904 certs.go:385] copying /home/jenkins/minikube-integration/19616-5723/.minikube/profiles/addons-207808/apiserver.key.5b4b0a4c -> /home/jenkins/minikube-integration/19616-5723/.minikube/profiles/addons-207808/apiserver.key
I0912 21:29:52.969975 13904 certs.go:363] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/19616-5723/.minikube/profiles/addons-207808/proxy-client.key
I0912 21:29:52.969991 13904 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19616-5723/.minikube/profiles/addons-207808/proxy-client.crt with IP's: []
I0912 21:29:53.257722 13904 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19616-5723/.minikube/profiles/addons-207808/proxy-client.crt ...
I0912 21:29:53.257753 13904 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19616-5723/.minikube/profiles/addons-207808/proxy-client.crt: {Name:mkc6b51a9c8668213ba863cb3ab92fb9cefaabe9 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0912 21:29:53.257907 13904 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19616-5723/.minikube/profiles/addons-207808/proxy-client.key ...
I0912 21:29:53.257916 13904 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19616-5723/.minikube/profiles/addons-207808/proxy-client.key: {Name:mkbe34557ed5d6f016b9d98e11742b207903fcfa Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0912 21:29:53.258071 13904 certs.go:484] found cert: /home/jenkins/minikube-integration/19616-5723/.minikube/certs/ca-key.pem (1679 bytes)
I0912 21:29:53.258108 13904 certs.go:484] found cert: /home/jenkins/minikube-integration/19616-5723/.minikube/certs/ca.pem (1078 bytes)
I0912 21:29:53.258131 13904 certs.go:484] found cert: /home/jenkins/minikube-integration/19616-5723/.minikube/certs/cert.pem (1123 bytes)
I0912 21:29:53.258156 13904 certs.go:484] found cert: /home/jenkins/minikube-integration/19616-5723/.minikube/certs/key.pem (1679 bytes)
I0912 21:29:53.258747 13904 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19616-5723/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0912 21:29:53.280799 13904 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19616-5723/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
I0912 21:29:53.301562 13904 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19616-5723/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0912 21:29:53.322358 13904 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19616-5723/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I0912 21:29:53.343178 13904 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19616-5723/.minikube/profiles/addons-207808/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1419 bytes)
I0912 21:29:53.363766 13904 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19616-5723/.minikube/profiles/addons-207808/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
I0912 21:29:53.384457 13904 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19616-5723/.minikube/profiles/addons-207808/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0912 21:29:53.405466 13904 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19616-5723/.minikube/profiles/addons-207808/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I0912 21:29:53.425876 13904 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19616-5723/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0912 21:29:53.446060 13904 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I0912 21:29:53.461087 13904 ssh_runner.go:195] Run: openssl version
I0912 21:29:53.465974 13904 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0912 21:29:53.474743 13904 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0912 21:29:53.477855 13904 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Sep 12 21:29 /usr/share/ca-certificates/minikubeCA.pem
I0912 21:29:53.477898 13904 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0912 21:29:53.484015 13904 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0912 21:29:53.492117 13904 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0912 21:29:53.494928 13904 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I0912 21:29:53.494991 13904 kubeadm.go:392] StartCluster: {Name:addons-207808 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726156396-19616@sha256:66b06a42534e914a5c8ad765d7508a93a34031939ec9a6b3a818ef0a444ff889 Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.1 ClusterName:addons-207808 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames
:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirm
warePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0912 21:29:53.495093 13904 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I0912 21:29:53.511274 13904 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I0912 21:29:53.519103 13904 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I0912 21:29:53.526797 13904 kubeadm.go:214] ignoring SystemVerification for kubeadm because of docker driver
I0912 21:29:53.526844 13904 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I0912 21:29:53.534352 13904 kubeadm.go:155] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I0912 21:29:53.534369 13904 kubeadm.go:157] found existing configuration files:
I0912 21:29:53.534411 13904 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I0912 21:29:53.542031 13904 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I0912 21:29:53.542085 13904 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I0912 21:29:53.549542 13904 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I0912 21:29:53.557004 13904 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I0912 21:29:53.557059 13904 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I0912 21:29:53.564348 13904 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I0912 21:29:53.571907 13904 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I0912 21:29:53.571955 13904 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I0912 21:29:53.579217 13904 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I0912 21:29:53.586635 13904 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I0912 21:29:53.586682 13904 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I0912 21:29:53.593751 13904 ssh_runner.go:286] Start: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.31.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I0912 21:29:53.627081 13904 kubeadm.go:310] [init] Using Kubernetes version: v1.31.1
I0912 21:29:53.627128 13904 kubeadm.go:310] [preflight] Running pre-flight checks
I0912 21:29:53.644324 13904 kubeadm.go:310] [preflight] The system verification failed. Printing the output from the verification:
I0912 21:29:53.644399 13904 kubeadm.go:310] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1067-gcp[0m
I0912 21:29:53.644464 13904 kubeadm.go:310] [0;37mOS[0m: [0;32mLinux[0m
I0912 21:29:53.644521 13904 kubeadm.go:310] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I0912 21:29:53.644560 13904 kubeadm.go:310] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I0912 21:29:53.644604 13904 kubeadm.go:310] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I0912 21:29:53.644642 13904 kubeadm.go:310] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I0912 21:29:53.644707 13904 kubeadm.go:310] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I0912 21:29:53.644780 13904 kubeadm.go:310] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I0912 21:29:53.644854 13904 kubeadm.go:310] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I0912 21:29:53.644906 13904 kubeadm.go:310] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I0912 21:29:53.644946 13904 kubeadm.go:310] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I0912 21:29:53.691442 13904 kubeadm.go:310] [preflight] Pulling images required for setting up a Kubernetes cluster
I0912 21:29:53.691539 13904 kubeadm.go:310] [preflight] This might take a minute or two, depending on the speed of your internet connection
I0912 21:29:53.691638 13904 kubeadm.go:310] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I0912 21:29:53.704053 13904 kubeadm.go:310] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I0912 21:29:53.707645 13904 out.go:235] - Generating certificates and keys ...
I0912 21:29:53.707764 13904 kubeadm.go:310] [certs] Using existing ca certificate authority
I0912 21:29:53.707867 13904 kubeadm.go:310] [certs] Using existing apiserver certificate and key on disk
I0912 21:29:53.845730 13904 kubeadm.go:310] [certs] Generating "apiserver-kubelet-client" certificate and key
I0912 21:29:53.968295 13904 kubeadm.go:310] [certs] Generating "front-proxy-ca" certificate and key
I0912 21:29:54.108860 13904 kubeadm.go:310] [certs] Generating "front-proxy-client" certificate and key
I0912 21:29:54.281685 13904 kubeadm.go:310] [certs] Generating "etcd/ca" certificate and key
I0912 21:29:54.377426 13904 kubeadm.go:310] [certs] Generating "etcd/server" certificate and key
I0912 21:29:54.377599 13904 kubeadm.go:310] [certs] etcd/server serving cert is signed for DNS names [addons-207808 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0912 21:29:54.659094 13904 kubeadm.go:310] [certs] Generating "etcd/peer" certificate and key
I0912 21:29:54.659249 13904 kubeadm.go:310] [certs] etcd/peer serving cert is signed for DNS names [addons-207808 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0912 21:29:54.815300 13904 kubeadm.go:310] [certs] Generating "etcd/healthcheck-client" certificate and key
I0912 21:29:55.195621 13904 kubeadm.go:310] [certs] Generating "apiserver-etcd-client" certificate and key
I0912 21:29:55.313792 13904 kubeadm.go:310] [certs] Generating "sa" key and public key
I0912 21:29:55.313989 13904 kubeadm.go:310] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I0912 21:29:55.494291 13904 kubeadm.go:310] [kubeconfig] Writing "admin.conf" kubeconfig file
I0912 21:29:55.689984 13904 kubeadm.go:310] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I0912 21:29:55.767039 13904 kubeadm.go:310] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I0912 21:29:55.953890 13904 kubeadm.go:310] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I0912 21:29:56.108313 13904 kubeadm.go:310] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I0912 21:29:56.108749 13904 kubeadm.go:310] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I0912 21:29:56.111100 13904 kubeadm.go:310] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I0912 21:29:56.113198 13904 out.go:235] - Booting up control plane ...
I0912 21:29:56.113342 13904 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-apiserver"
I0912 21:29:56.113520 13904 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I0912 21:29:56.113665 13904 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-scheduler"
I0912 21:29:56.122675 13904 kubeadm.go:310] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I0912 21:29:56.127746 13904 kubeadm.go:310] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I0912 21:29:56.127807 13904 kubeadm.go:310] [kubelet-start] Starting the kubelet
I0912 21:29:56.212312 13904 kubeadm.go:310] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I0912 21:29:56.212411 13904 kubeadm.go:310] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I0912 21:29:56.714192 13904 kubeadm.go:310] [kubelet-check] The kubelet is healthy after 501.893561ms
I0912 21:29:56.714305 13904 kubeadm.go:310] [api-check] Waiting for a healthy API server. This can take up to 4m0s
I0912 21:30:01.215979 13904 kubeadm.go:310] [api-check] The API server is healthy after 4.501781101s
I0912 21:30:01.226470 13904 kubeadm.go:310] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I0912 21:30:01.237092 13904 kubeadm.go:310] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I0912 21:30:01.256086 13904 kubeadm.go:310] [upload-certs] Skipping phase. Please see --upload-certs
I0912 21:30:01.256326 13904 kubeadm.go:310] [mark-control-plane] Marking the node addons-207808 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I0912 21:30:01.263531 13904 kubeadm.go:310] [bootstrap-token] Using token: suugsy.pqb0hml2cfmbk8du
I0912 21:30:01.265390 13904 out.go:235] - Configuring RBAC rules ...
I0912 21:30:01.265534 13904 kubeadm.go:310] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I0912 21:30:01.268482 13904 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I0912 21:30:01.275132 13904 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I0912 21:30:01.277538 13904 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I0912 21:30:01.279902 13904 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I0912 21:30:01.282433 13904 kubeadm.go:310] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I0912 21:30:01.621452 13904 kubeadm.go:310] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I0912 21:30:02.058185 13904 kubeadm.go:310] [addons] Applied essential addon: CoreDNS
I0912 21:30:02.621139 13904 kubeadm.go:310] [addons] Applied essential addon: kube-proxy
I0912 21:30:02.621937 13904 kubeadm.go:310]
I0912 21:30:02.622029 13904 kubeadm.go:310] Your Kubernetes control-plane has initialized successfully!
I0912 21:30:02.622040 13904 kubeadm.go:310]
I0912 21:30:02.622146 13904 kubeadm.go:310] To start using your cluster, you need to run the following as a regular user:
I0912 21:30:02.622157 13904 kubeadm.go:310]
I0912 21:30:02.622198 13904 kubeadm.go:310] mkdir -p $HOME/.kube
I0912 21:30:02.622288 13904 kubeadm.go:310] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I0912 21:30:02.622363 13904 kubeadm.go:310] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I0912 21:30:02.622373 13904 kubeadm.go:310]
I0912 21:30:02.622467 13904 kubeadm.go:310] Alternatively, if you are the root user, you can run:
I0912 21:30:02.622487 13904 kubeadm.go:310]
I0912 21:30:02.622560 13904 kubeadm.go:310] export KUBECONFIG=/etc/kubernetes/admin.conf
I0912 21:30:02.622570 13904 kubeadm.go:310]
I0912 21:30:02.622647 13904 kubeadm.go:310] You should now deploy a pod network to the cluster.
I0912 21:30:02.622773 13904 kubeadm.go:310] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I0912 21:30:02.622876 13904 kubeadm.go:310] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I0912 21:30:02.622889 13904 kubeadm.go:310]
I0912 21:30:02.623030 13904 kubeadm.go:310] You can now join any number of control-plane nodes by copying certificate authorities
I0912 21:30:02.623143 13904 kubeadm.go:310] and service account keys on each node and then running the following as root:
I0912 21:30:02.623153 13904 kubeadm.go:310]
I0912 21:30:02.623293 13904 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token suugsy.pqb0hml2cfmbk8du \
I0912 21:30:02.623454 13904 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:995bc7e43594bb0c046233ae8059535de7d0e8faaa285ff35d9af3858a82acc4 \
I0912 21:30:02.623490 13904 kubeadm.go:310] --control-plane
I0912 21:30:02.623502 13904 kubeadm.go:310]
I0912 21:30:02.623630 13904 kubeadm.go:310] Then you can join any number of worker nodes by running the following on each as root:
I0912 21:30:02.623639 13904 kubeadm.go:310]
I0912 21:30:02.623742 13904 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token suugsy.pqb0hml2cfmbk8du \
I0912 21:30:02.623878 13904 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:995bc7e43594bb0c046233ae8059535de7d0e8faaa285ff35d9af3858a82acc4
I0912 21:30:02.625887 13904 kubeadm.go:310] W0912 21:29:53.624636 1918 common.go:101] your configuration file uses a deprecated API spec: "kubeadm.k8s.io/v1beta3" (kind: "ClusterConfiguration"). Please use 'kubeadm config migrate --old-config old.yaml --new-config new.yaml', which will write the new, similar spec using a newer API version.
I0912 21:30:02.626205 13904 kubeadm.go:310] W0912 21:29:53.625270 1918 common.go:101] your configuration file uses a deprecated API spec: "kubeadm.k8s.io/v1beta3" (kind: "InitConfiguration"). Please use 'kubeadm config migrate --old-config old.yaml --new-config new.yaml', which will write the new, similar spec using a newer API version.
I0912 21:30:02.626451 13904 kubeadm.go:310] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1067-gcp\n", err: exit status 1
I0912 21:30:02.626595 13904 kubeadm.go:310] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I0912 21:30:02.626624 13904 cni.go:84] Creating CNI manager for ""
I0912 21:30:02.626648 13904 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0912 21:30:02.628300 13904 out.go:177] * Configuring bridge CNI (Container Networking Interface) ...
I0912 21:30:02.629430 13904 ssh_runner.go:195] Run: sudo mkdir -p /etc/cni/net.d
I0912 21:30:02.638511 13904 ssh_runner.go:362] scp memory --> /etc/cni/net.d/1-k8s.conflist (496 bytes)
I0912 21:30:02.655207 13904 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I0912 21:30:02.655289 13904 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I0912 21:30:02.655336 13904 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes addons-207808 minikube.k8s.io/updated_at=2024_09_12T21_30_02_0700 minikube.k8s.io/version=v1.34.0 minikube.k8s.io/commit=f6bc674a17941874d4e5b792b09c1791d30622b8 minikube.k8s.io/name=addons-207808 minikube.k8s.io/primary=true
I0912 21:30:02.737003 13904 ops.go:34] apiserver oom_adj: -16
I0912 21:30:02.737033 13904 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0912 21:30:03.237125 13904 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0912 21:30:03.737319 13904 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0912 21:30:04.237387 13904 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0912 21:30:04.737103 13904 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0912 21:30:05.237237 13904 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0912 21:30:05.737702 13904 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0912 21:30:06.237275 13904 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0912 21:30:06.737490 13904 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0912 21:30:07.237069 13904 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0912 21:30:07.738042 13904 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0912 21:30:07.806460 13904 kubeadm.go:1113] duration metric: took 5.151221309s to wait for elevateKubeSystemPrivileges
I0912 21:30:07.806494 13904 kubeadm.go:394] duration metric: took 14.311502028s to StartCluster
I0912 21:30:07.806515 13904 settings.go:142] acquiring lock: {Name:mk2d37c2f531fa16878dd10abfcfc5daf090ef07 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0912 21:30:07.806623 13904 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/19616-5723/kubeconfig
I0912 21:30:07.806954 13904 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19616-5723/kubeconfig: {Name:mk37c718bc544b1cff45c15afa951be50347f04b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0912 21:30:07.807200 13904 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I0912 21:30:07.807209 13904 start.go:235] Will wait 6m0s for node &{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}
I0912 21:30:07.807267 13904 addons.go:507] enable addons start: toEnable=map[ambassador:false auto-pause:false cloud-spanner:true csi-hostpath-driver:true dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:true gvisor:false headlamp:false helm-tiller:true inaccel:false ingress:true ingress-dns:true inspektor-gadget:true istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:true nvidia-device-plugin:true nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:true registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:true volcano:true volumesnapshots:true yakd:true]
I0912 21:30:07.807379 13904 addons.go:69] Setting yakd=true in profile "addons-207808"
I0912 21:30:07.807406 13904 addons.go:234] Setting addon yakd=true in "addons-207808"
I0912 21:30:07.807408 13904 config.go:182] Loaded profile config "addons-207808": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.31.1
I0912 21:30:07.807437 13904 host.go:66] Checking if "addons-207808" exists ...
I0912 21:30:07.807467 13904 addons.go:69] Setting inspektor-gadget=true in profile "addons-207808"
I0912 21:30:07.807493 13904 addons.go:234] Setting addon inspektor-gadget=true in "addons-207808"
I0912 21:30:07.807524 13904 host.go:66] Checking if "addons-207808" exists ...
I0912 21:30:07.807692 13904 addons.go:69] Setting gcp-auth=true in profile "addons-207808"
I0912 21:30:07.807732 13904 mustload.go:65] Loading cluster: addons-207808
I0912 21:30:07.807918 13904 config.go:182] Loaded profile config "addons-207808": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.31.1
I0912 21:30:07.807938 13904 cli_runner.go:164] Run: docker container inspect addons-207808 --format={{.State.Status}}
I0912 21:30:07.807972 13904 cli_runner.go:164] Run: docker container inspect addons-207808 --format={{.State.Status}}
I0912 21:30:07.808091 13904 addons.go:69] Setting csi-hostpath-driver=true in profile "addons-207808"
I0912 21:30:07.808155 13904 addons.go:234] Setting addon csi-hostpath-driver=true in "addons-207808"
I0912 21:30:07.808149 13904 addons.go:69] Setting default-storageclass=true in profile "addons-207808"
I0912 21:30:07.808186 13904 cli_runner.go:164] Run: docker container inspect addons-207808 --format={{.State.Status}}
I0912 21:30:07.808192 13904 addons.go:69] Setting cloud-spanner=true in profile "addons-207808"
I0912 21:30:07.808199 13904 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "addons-207808"
I0912 21:30:07.808217 13904 addons.go:234] Setting addon cloud-spanner=true in "addons-207808"
I0912 21:30:07.808270 13904 host.go:66] Checking if "addons-207808" exists ...
I0912 21:30:07.808489 13904 addons.go:69] Setting helm-tiller=true in profile "addons-207808"
I0912 21:30:07.808516 13904 cli_runner.go:164] Run: docker container inspect addons-207808 --format={{.State.Status}}
I0912 21:30:07.808526 13904 addons.go:69] Setting ingress=true in profile "addons-207808"
I0912 21:30:07.808544 13904 addons.go:234] Setting addon ingress=true in "addons-207808"
I0912 21:30:07.808574 13904 host.go:66] Checking if "addons-207808" exists ...
I0912 21:30:07.808709 13904 cli_runner.go:164] Run: docker container inspect addons-207808 --format={{.State.Status}}
I0912 21:30:07.808186 13904 host.go:66] Checking if "addons-207808" exists ...
I0912 21:30:07.808975 13904 cli_runner.go:164] Run: docker container inspect addons-207808 --format={{.State.Status}}
I0912 21:30:07.809176 13904 addons.go:69] Setting ingress-dns=true in profile "addons-207808"
I0912 21:30:07.809206 13904 addons.go:234] Setting addon ingress-dns=true in "addons-207808"
I0912 21:30:07.809248 13904 host.go:66] Checking if "addons-207808" exists ...
I0912 21:30:07.809250 13904 cli_runner.go:164] Run: docker container inspect addons-207808 --format={{.State.Status}}
I0912 21:30:07.809693 13904 cli_runner.go:164] Run: docker container inspect addons-207808 --format={{.State.Status}}
I0912 21:30:07.811129 13904 addons.go:69] Setting storage-provisioner-rancher=true in profile "addons-207808"
I0912 21:30:07.812528 13904 out.go:177] * Verifying Kubernetes components...
I0912 21:30:07.814051 13904 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0912 21:30:07.814167 13904 addons.go:69] Setting volumesnapshots=true in profile "addons-207808"
I0912 21:30:07.814206 13904 addons.go:234] Setting addon volumesnapshots=true in "addons-207808"
I0912 21:30:07.814245 13904 host.go:66] Checking if "addons-207808" exists ...
I0912 21:30:07.814723 13904 cli_runner.go:164] Run: docker container inspect addons-207808 --format={{.State.Status}}
I0912 21:30:07.817195 13904 addons.go:69] Setting volcano=true in profile "addons-207808"
I0912 21:30:07.817380 13904 addons.go:234] Setting addon volcano=true in "addons-207808"
I0912 21:30:07.817432 13904 host.go:66] Checking if "addons-207808" exists ...
I0912 21:30:07.817922 13904 cli_runner.go:164] Run: docker container inspect addons-207808 --format={{.State.Status}}
I0912 21:30:07.829619 13904 addons_storage_classes.go:33] enableOrDisableStorageClasses storage-provisioner-rancher=true on "addons-207808"
I0912 21:30:07.829795 13904 addons.go:69] Setting nvidia-device-plugin=true in profile "addons-207808"
I0912 21:30:07.808517 13904 addons.go:234] Setting addon helm-tiller=true in "addons-207808"
I0912 21:30:07.829851 13904 host.go:66] Checking if "addons-207808" exists ...
I0912 21:30:07.830506 13904 addons.go:69] Setting metrics-server=true in profile "addons-207808"
I0912 21:30:07.830532 13904 cli_runner.go:164] Run: docker container inspect addons-207808 --format={{.State.Status}}
I0912 21:30:07.830555 13904 addons.go:234] Setting addon metrics-server=true in "addons-207808"
I0912 21:30:07.830594 13904 host.go:66] Checking if "addons-207808" exists ...
I0912 21:30:07.830617 13904 addons.go:69] Setting registry=true in profile "addons-207808"
I0912 21:30:07.830698 13904 addons.go:234] Setting addon nvidia-device-plugin=true in "addons-207808"
I0912 21:30:07.830733 13904 host.go:66] Checking if "addons-207808" exists ...
I0912 21:30:07.831277 13904 cli_runner.go:164] Run: docker container inspect addons-207808 --format={{.State.Status}}
I0912 21:30:07.831703 13904 addons.go:69] Setting storage-provisioner=true in profile "addons-207808"
I0912 21:30:07.831733 13904 addons.go:234] Setting addon storage-provisioner=true in "addons-207808"
I0912 21:30:07.831752 13904 addons.go:234] Setting addon registry=true in "addons-207808"
I0912 21:30:07.831762 13904 host.go:66] Checking if "addons-207808" exists ...
I0912 21:30:07.831795 13904 host.go:66] Checking if "addons-207808" exists ...
I0912 21:30:07.832268 13904 cli_runner.go:164] Run: docker container inspect addons-207808 --format={{.State.Status}}
I0912 21:30:07.832340 13904 cli_runner.go:164] Run: docker container inspect addons-207808 --format={{.State.Status}}
I0912 21:30:07.832880 13904 cli_runner.go:164] Run: docker container inspect addons-207808 --format={{.State.Status}}
I0912 21:30:07.851950 13904 host.go:66] Checking if "addons-207808" exists ...
I0912 21:30:07.852360 13904 out.go:177] - Using image docker.io/marcnuri/yakd:0.0.5
I0912 21:30:07.853095 13904 out.go:177] - Using image gcr.io/k8s-minikube/minikube-ingress-dns:0.0.3
I0912 21:30:07.853802 13904 addons.go:431] installing /etc/kubernetes/addons/yakd-ns.yaml
I0912 21:30:07.853828 13904 ssh_runner.go:362] scp yakd/yakd-ns.yaml --> /etc/kubernetes/addons/yakd-ns.yaml (171 bytes)
I0912 21:30:07.853933 13904 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-207808
I0912 21:30:07.855587 13904 addons.go:431] installing /etc/kubernetes/addons/ingress-dns-pod.yaml
I0912 21:30:07.855605 13904 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ingress-dns-pod.yaml (2442 bytes)
I0912 21:30:07.855653 13904 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-207808
I0912 21:30:07.868159 13904 out.go:177] - Using image ghcr.io/inspektor-gadget/inspektor-gadget:v0.32.0
I0912 21:30:07.868587 13904 cli_runner.go:164] Run: docker container inspect addons-207808 --format={{.State.Status}}
I0912 21:30:07.869883 13904 addons.go:234] Setting addon default-storageclass=true in "addons-207808"
I0912 21:30:07.869925 13904 host.go:66] Checking if "addons-207808" exists ...
I0912 21:30:07.870783 13904 cli_runner.go:164] Run: docker container inspect addons-207808 --format={{.State.Status}}
I0912 21:30:07.879959 13904 addons.go:431] installing /etc/kubernetes/addons/ig-namespace.yaml
I0912 21:30:07.879981 13904 ssh_runner.go:362] scp inspektor-gadget/ig-namespace.yaml --> /etc/kubernetes/addons/ig-namespace.yaml (55 bytes)
I0912 21:30:07.880072 13904 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-207808
I0912 21:30:07.884159 13904 out.go:177] - Using image registry.k8s.io/sig-storage/csi-provisioner:v3.3.0
I0912 21:30:07.884323 13904 out.go:177] - Using image gcr.io/cloud-spanner-emulator/emulator:1.5.23
I0912 21:30:07.891436 13904 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.3
I0912 21:30:07.891689 13904 addons.go:431] installing /etc/kubernetes/addons/deployment.yaml
I0912 21:30:07.891702 13904 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/deployment.yaml (1004 bytes)
I0912 21:30:07.891767 13904 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-207808
I0912 21:30:07.893205 13904 out.go:177] - Using image registry.k8s.io/sig-storage/csi-attacher:v4.0.0
I0912 21:30:07.894838 13904 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.3
I0912 21:30:07.894947 13904 out.go:177] - Using image registry.k8s.io/sig-storage/csi-external-health-monitor-controller:v0.7.0
I0912 21:30:07.896649 13904 out.go:177] - Using image registry.k8s.io/ingress-nginx/controller:v1.11.2
I0912 21:30:07.897942 13904 out.go:177] - Using image registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.6.0
I0912 21:30:07.898308 13904 addons.go:431] installing /etc/kubernetes/addons/ingress-deploy.yaml
I0912 21:30:07.898325 13904 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ingress-deploy.yaml (16078 bytes)
I0912 21:30:07.898386 13904 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-207808
I0912 21:30:07.900735 13904 out.go:177] - Using image registry.k8s.io/sig-storage/hostpathplugin:v1.9.0
I0912 21:30:07.902421 13904 out.go:177] - Using image registry.k8s.io/sig-storage/livenessprobe:v2.8.0
I0912 21:30:07.903913 13904 out.go:177] - Using image registry.k8s.io/sig-storage/csi-resizer:v1.6.0
I0912 21:30:07.905154 13904 out.go:177] - Using image registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0
I0912 21:30:07.906381 13904 addons.go:431] installing /etc/kubernetes/addons/rbac-external-attacher.yaml
I0912 21:30:07.906397 13904 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-attacher.yaml --> /etc/kubernetes/addons/rbac-external-attacher.yaml (3073 bytes)
I0912 21:30:07.906457 13904 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-207808
I0912 21:30:07.906667 13904 out.go:177] - Using image registry.k8s.io/metrics-server/metrics-server:v0.7.2
I0912 21:30:07.908223 13904 addons.go:431] installing /etc/kubernetes/addons/metrics-apiservice.yaml
I0912 21:30:07.908247 13904 ssh_runner.go:362] scp metrics-server/metrics-apiservice.yaml --> /etc/kubernetes/addons/metrics-apiservice.yaml (424 bytes)
I0912 21:30:07.908305 13904 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-207808
I0912 21:30:07.915100 13904 out.go:177] - Using image docker.io/volcanosh/vc-webhook-manager:v1.9.0
I0912 21:30:07.917854 13904 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19616-5723/.minikube/machines/addons-207808/id_rsa Username:docker}
I0912 21:30:07.920461 13904 out.go:177] - Using image registry.k8s.io/sig-storage/snapshot-controller:v6.1.0
I0912 21:30:07.920666 13904 out.go:177] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I0912 21:30:07.922545 13904 out.go:177] - Using image docker.io/volcanosh/vc-controller-manager:v1.9.0
I0912 21:30:07.924911 13904 out.go:177] - Using image docker.io/volcanosh/vc-scheduler:v1.9.0
I0912 21:30:07.925166 13904 addons.go:431] installing /etc/kubernetes/addons/storage-provisioner.yaml
I0912 21:30:07.925185 13904 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I0912 21:30:07.925242 13904 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-207808
I0912 21:30:07.925788 13904 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml
I0912 21:30:07.925805 13904 ssh_runner.go:362] scp volumesnapshots/csi-hostpath-snapshotclass.yaml --> /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml (934 bytes)
I0912 21:30:07.925855 13904 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-207808
I0912 21:30:07.927237 13904 addons.go:431] installing /etc/kubernetes/addons/volcano-deployment.yaml
I0912 21:30:07.927257 13904 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/volcano-deployment.yaml (434001 bytes)
I0912 21:30:07.927306 13904 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-207808
I0912 21:30:07.928536 13904 out.go:177] - Using image ghcr.io/helm/tiller:v2.17.0
I0912 21:30:07.929634 13904 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19616-5723/.minikube/machines/addons-207808/id_rsa Username:docker}
I0912 21:30:07.929635 13904 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19616-5723/.minikube/machines/addons-207808/id_rsa Username:docker}
I0912 21:30:07.929895 13904 addons.go:431] installing /etc/kubernetes/addons/helm-tiller-dp.yaml
I0912 21:30:07.929986 13904 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/helm-tiller-dp.yaml (2422 bytes)
I0912 21:30:07.930086 13904 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-207808
I0912 21:30:07.935643 13904 out.go:177] - Using image nvcr.io/nvidia/k8s-device-plugin:v0.16.2
I0912 21:30:07.936974 13904 addons.go:431] installing /etc/kubernetes/addons/nvidia-device-plugin.yaml
I0912 21:30:07.936994 13904 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/nvidia-device-plugin.yaml (1966 bytes)
I0912 21:30:07.937053 13904 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-207808
I0912 21:30:07.943752 13904 addons.go:234] Setting addon storage-provisioner-rancher=true in "addons-207808"
I0912 21:30:07.943794 13904 host.go:66] Checking if "addons-207808" exists ...
I0912 21:30:07.944227 13904 cli_runner.go:164] Run: docker container inspect addons-207808 --format={{.State.Status}}
I0912 21:30:07.946885 13904 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19616-5723/.minikube/machines/addons-207808/id_rsa Username:docker}
I0912 21:30:07.961235 13904 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19616-5723/.minikube/machines/addons-207808/id_rsa Username:docker}
I0912 21:30:07.972598 13904 out.go:177] - Using image docker.io/registry:2.8.3
I0912 21:30:07.972648 13904 out.go:177] - Using image docker.io/rancher/local-path-provisioner:v0.0.22
I0912 21:30:07.974476 13904 out.go:177] - Using image docker.io/busybox:stable
I0912 21:30:07.974576 13904 out.go:177] - Using image gcr.io/k8s-minikube/kube-registry-proxy:0.0.6
I0912 21:30:07.975772 13904 addons.go:431] installing /etc/kubernetes/addons/storage-provisioner-rancher.yaml
I0912 21:30:07.975812 13904 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner-rancher.yaml (3113 bytes)
I0912 21:30:07.975869 13904 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-207808
I0912 21:30:07.976263 13904 addons.go:431] installing /etc/kubernetes/addons/registry-rc.yaml
I0912 21:30:07.976277 13904 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-rc.yaml (860 bytes)
I0912 21:30:07.976362 13904 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-207808
I0912 21:30:07.985521 13904 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19616-5723/.minikube/machines/addons-207808/id_rsa Username:docker}
I0912 21:30:07.988313 13904 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19616-5723/.minikube/machines/addons-207808/id_rsa Username:docker}
I0912 21:30:07.989934 13904 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19616-5723/.minikube/machines/addons-207808/id_rsa Username:docker}
I0912 21:30:07.993205 13904 addons.go:431] installing /etc/kubernetes/addons/storageclass.yaml
I0912 21:30:07.993222 13904 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I0912 21:30:07.993265 13904 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-207808
I0912 21:30:08.001114 13904 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19616-5723/.minikube/machines/addons-207808/id_rsa Username:docker}
I0912 21:30:08.002108 13904 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19616-5723/.minikube/machines/addons-207808/id_rsa Username:docker}
I0912 21:30:08.007068 13904 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19616-5723/.minikube/machines/addons-207808/id_rsa Username:docker}
I0912 21:30:08.007215 13904 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19616-5723/.minikube/machines/addons-207808/id_rsa Username:docker}
I0912 21:30:08.007827 13904 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19616-5723/.minikube/machines/addons-207808/id_rsa Username:docker}
I0912 21:30:08.009392 13904 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19616-5723/.minikube/machines/addons-207808/id_rsa Username:docker}
I0912 21:30:08.016546 13904 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19616-5723/.minikube/machines/addons-207808/id_rsa Username:docker}
I0912 21:30:08.356145 13904 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ingress-dns-pod.yaml
I0912 21:30:08.357522 13904 addons.go:431] installing /etc/kubernetes/addons/yakd-sa.yaml
I0912 21:30:08.357546 13904 ssh_runner.go:362] scp yakd/yakd-sa.yaml --> /etc/kubernetes/addons/yakd-sa.yaml (247 bytes)
I0912 21:30:08.432767 13904 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0912 21:30:08.432812 13904 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I0912 21:30:08.436296 13904 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/deployment.yaml
I0912 21:30:08.442005 13904 addons.go:431] installing /etc/kubernetes/addons/rbac-hostpath.yaml
I0912 21:30:08.442083 13904 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-hostpath.yaml --> /etc/kubernetes/addons/rbac-hostpath.yaml (4266 bytes)
I0912 21:30:08.536005 13904 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ingress-deploy.yaml
I0912 21:30:08.551124 13904 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/volcano-deployment.yaml
I0912 21:30:08.555937 13904 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/nvidia-device-plugin.yaml
I0912 21:30:08.632488 13904 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I0912 21:30:08.742765 13904 addons.go:431] installing /etc/kubernetes/addons/helm-tiller-rbac.yaml
I0912 21:30:08.742854 13904 ssh_runner.go:362] scp helm-tiller/helm-tiller-rbac.yaml --> /etc/kubernetes/addons/helm-tiller-rbac.yaml (1188 bytes)
I0912 21:30:08.744572 13904 addons.go:431] installing /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml
I0912 21:30:08.744638 13904 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-health-monitor-controller.yaml --> /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml (3038 bytes)
I0912 21:30:08.832315 13904 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I0912 21:30:08.834470 13904 addons.go:431] installing /etc/kubernetes/addons/yakd-crb.yaml
I0912 21:30:08.834499 13904 ssh_runner.go:362] scp yakd/yakd-crb.yaml --> /etc/kubernetes/addons/yakd-crb.yaml (422 bytes)
I0912 21:30:08.835582 13904 addons.go:431] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml
I0912 21:30:08.835659 13904 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshotclasses.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml (6471 bytes)
I0912 21:30:08.838599 13904 addons.go:431] installing /etc/kubernetes/addons/ig-serviceaccount.yaml
I0912 21:30:08.838616 13904 ssh_runner.go:362] scp inspektor-gadget/ig-serviceaccount.yaml --> /etc/kubernetes/addons/ig-serviceaccount.yaml (80 bytes)
I0912 21:30:08.852405 13904 addons.go:431] installing /etc/kubernetes/addons/registry-svc.yaml
I0912 21:30:08.852486 13904 ssh_runner.go:362] scp registry/registry-svc.yaml --> /etc/kubernetes/addons/registry-svc.yaml (398 bytes)
I0912 21:30:08.853062 13904 addons.go:431] installing /etc/kubernetes/addons/metrics-server-deployment.yaml
I0912 21:30:08.853124 13904 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-deployment.yaml (1907 bytes)
I0912 21:30:09.036409 13904 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner-rancher.yaml
I0912 21:30:09.042999 13904 addons.go:431] installing /etc/kubernetes/addons/helm-tiller-svc.yaml
I0912 21:30:09.043087 13904 ssh_runner.go:362] scp helm-tiller/helm-tiller-svc.yaml --> /etc/kubernetes/addons/helm-tiller-svc.yaml (951 bytes)
I0912 21:30:09.052930 13904 addons.go:431] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml
I0912 21:30:09.053018 13904 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshotcontents.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml (23126 bytes)
I0912 21:30:09.231669 13904 addons.go:431] installing /etc/kubernetes/addons/rbac-external-provisioner.yaml
I0912 21:30:09.231701 13904 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-provisioner.yaml --> /etc/kubernetes/addons/rbac-external-provisioner.yaml (4442 bytes)
I0912 21:30:09.236060 13904 addons.go:431] installing /etc/kubernetes/addons/ig-role.yaml
I0912 21:30:09.236087 13904 ssh_runner.go:362] scp inspektor-gadget/ig-role.yaml --> /etc/kubernetes/addons/ig-role.yaml (210 bytes)
I0912 21:30:09.242132 13904 addons.go:431] installing /etc/kubernetes/addons/yakd-svc.yaml
I0912 21:30:09.242214 13904 ssh_runner.go:362] scp yakd/yakd-svc.yaml --> /etc/kubernetes/addons/yakd-svc.yaml (412 bytes)
I0912 21:30:09.345938 13904 addons.go:431] installing /etc/kubernetes/addons/registry-proxy.yaml
I0912 21:30:09.346049 13904 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-proxy.yaml (947 bytes)
I0912 21:30:09.352811 13904 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/helm-tiller-dp.yaml -f /etc/kubernetes/addons/helm-tiller-rbac.yaml -f /etc/kubernetes/addons/helm-tiller-svc.yaml
I0912 21:30:09.434642 13904 addons.go:431] installing /etc/kubernetes/addons/metrics-server-rbac.yaml
I0912 21:30:09.434682 13904 ssh_runner.go:362] scp metrics-server/metrics-server-rbac.yaml --> /etc/kubernetes/addons/metrics-server-rbac.yaml (2175 bytes)
I0912 21:30:09.447579 13904 addons.go:431] installing /etc/kubernetes/addons/yakd-dp.yaml
I0912 21:30:09.447660 13904 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/yakd-dp.yaml (2017 bytes)
I0912 21:30:09.640196 13904 addons.go:431] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml
I0912 21:30:09.640226 13904 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshots.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml (19582 bytes)
I0912 21:30:09.650073 13904 addons.go:431] installing /etc/kubernetes/addons/ig-rolebinding.yaml
I0912 21:30:09.650169 13904 ssh_runner.go:362] scp inspektor-gadget/ig-rolebinding.yaml --> /etc/kubernetes/addons/ig-rolebinding.yaml (244 bytes)
I0912 21:30:09.733822 13904 addons.go:431] installing /etc/kubernetes/addons/rbac-external-resizer.yaml
I0912 21:30:09.733916 13904 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-resizer.yaml --> /etc/kubernetes/addons/rbac-external-resizer.yaml (2943 bytes)
I0912 21:30:09.751079 13904 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/registry-rc.yaml -f /etc/kubernetes/addons/registry-svc.yaml -f /etc/kubernetes/addons/registry-proxy.yaml
I0912 21:30:09.934582 13904 addons.go:431] installing /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml
I0912 21:30:09.934615 13904 ssh_runner.go:362] scp volumesnapshots/rbac-volume-snapshot-controller.yaml --> /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml (3545 bytes)
I0912 21:30:10.041680 13904 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/yakd-ns.yaml -f /etc/kubernetes/addons/yakd-sa.yaml -f /etc/kubernetes/addons/yakd-crb.yaml -f /etc/kubernetes/addons/yakd-svc.yaml -f /etc/kubernetes/addons/yakd-dp.yaml
I0912 21:30:10.142656 13904 addons.go:431] installing /etc/kubernetes/addons/metrics-server-service.yaml
I0912 21:30:10.142746 13904 ssh_runner.go:362] scp metrics-server/metrics-server-service.yaml --> /etc/kubernetes/addons/metrics-server-service.yaml (446 bytes)
I0912 21:30:10.335641 13904 addons.go:431] installing /etc/kubernetes/addons/rbac-external-snapshotter.yaml
I0912 21:30:10.335738 13904 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-snapshotter.yaml --> /etc/kubernetes/addons/rbac-external-snapshotter.yaml (3149 bytes)
I0912 21:30:10.538666 13904 addons.go:431] installing /etc/kubernetes/addons/ig-clusterrole.yaml
I0912 21:30:10.538749 13904 ssh_runner.go:362] scp inspektor-gadget/ig-clusterrole.yaml --> /etc/kubernetes/addons/ig-clusterrole.yaml (1485 bytes)
I0912 21:30:10.732748 13904 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
I0912 21:30:10.733037 13904 addons.go:431] installing /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I0912 21:30:10.733157 13904 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml (1475 bytes)
I0912 21:30:10.737671 13904 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-attacher.yaml
I0912 21:30:10.737744 13904 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-attacher.yaml (2143 bytes)
I0912 21:30:11.047414 13904 addons.go:431] installing /etc/kubernetes/addons/ig-clusterrolebinding.yaml
I0912 21:30:11.047452 13904 ssh_runner.go:362] scp inspektor-gadget/ig-clusterrolebinding.yaml --> /etc/kubernetes/addons/ig-clusterrolebinding.yaml (274 bytes)
I0912 21:30:11.132893 13904 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml
I0912 21:30:11.132936 13904 ssh_runner.go:362] scp csi-hostpath-driver/deploy/csi-hostpath-driverinfo.yaml --> /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml (1274 bytes)
I0912 21:30:11.140665 13904 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I0912 21:30:11.152927 13904 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ingress-dns-pod.yaml: (2.796737958s)
I0912 21:30:11.153096 13904 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -": (2.720264145s)
I0912 21:30:11.153139 13904 start.go:971] {"host.minikube.internal": 192.168.49.1} host record injected into CoreDNS's ConfigMap
I0912 21:30:11.154425 13904 ssh_runner.go:235] Completed: sudo systemctl start kubelet: (2.721624502s)
I0912 21:30:11.155560 13904 node_ready.go:35] waiting up to 6m0s for node "addons-207808" to be "Ready" ...
I0912 21:30:11.154540 13904 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/deployment.yaml: (2.718153623s)
I0912 21:30:11.238150 13904 node_ready.go:49] node "addons-207808" has status "Ready":"True"
I0912 21:30:11.238235 13904 node_ready.go:38] duration metric: took 82.56405ms for node "addons-207808" to be "Ready" ...
I0912 21:30:11.238261 13904 pod_ready.go:36] extra waiting up to 6m0s for all system-critical pods including labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0912 21:30:11.259345 13904 pod_ready.go:79] waiting up to 6m0s for pod "coredns-7c65d6cfc9-7whgg" in "kube-system" namespace to be "Ready" ...
I0912 21:30:11.552386 13904 addons.go:431] installing /etc/kubernetes/addons/ig-crd.yaml
I0912 21:30:11.552475 13904 ssh_runner.go:362] scp inspektor-gadget/ig-crd.yaml --> /etc/kubernetes/addons/ig-crd.yaml (5216 bytes)
I0912 21:30:11.733035 13904 kapi.go:214] "coredns" deployment in "kube-system" namespace and "addons-207808" context rescaled to 1 replicas
I0912 21:30:11.848740 13904 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-plugin.yaml
I0912 21:30:11.848767 13904 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-plugin.yaml (8201 bytes)
I0912 21:30:12.344774 13904 addons.go:431] installing /etc/kubernetes/addons/ig-daemonset.yaml
I0912 21:30:12.344862 13904 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-daemonset.yaml (7735 bytes)
I0912 21:30:12.538814 13904 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-resizer.yaml
I0912 21:30:12.538885 13904 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-resizer.yaml (2191 bytes)
I0912 21:30:12.738265 13904 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ig-namespace.yaml -f /etc/kubernetes/addons/ig-serviceaccount.yaml -f /etc/kubernetes/addons/ig-role.yaml -f /etc/kubernetes/addons/ig-rolebinding.yaml -f /etc/kubernetes/addons/ig-clusterrole.yaml -f /etc/kubernetes/addons/ig-clusterrolebinding.yaml -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-daemonset.yaml
I0912 21:30:12.844695 13904 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-storageclass.yaml
I0912 21:30:12.844800 13904 ssh_runner.go:362] scp csi-hostpath-driver/deploy/csi-hostpath-storageclass.yaml --> /etc/kubernetes/addons/csi-hostpath-storageclass.yaml (846 bytes)
I0912 21:30:13.337022 13904 pod_ready.go:93] pod "coredns-7c65d6cfc9-7whgg" in "kube-system" namespace has status "Ready":"True"
I0912 21:30:13.337115 13904 pod_ready.go:82] duration metric: took 2.07769498s for pod "coredns-7c65d6cfc9-7whgg" in "kube-system" namespace to be "Ready" ...
I0912 21:30:13.337140 13904 pod_ready.go:79] waiting up to 6m0s for pod "coredns-7c65d6cfc9-nqb66" in "kube-system" namespace to be "Ready" ...
I0912 21:30:13.451623 13904 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/rbac-external-attacher.yaml -f /etc/kubernetes/addons/rbac-hostpath.yaml -f /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml -f /etc/kubernetes/addons/rbac-external-provisioner.yaml -f /etc/kubernetes/addons/rbac-external-resizer.yaml -f /etc/kubernetes/addons/rbac-external-snapshotter.yaml -f /etc/kubernetes/addons/csi-hostpath-attacher.yaml -f /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml -f /etc/kubernetes/addons/csi-hostpath-plugin.yaml -f /etc/kubernetes/addons/csi-hostpath-resizer.yaml -f /etc/kubernetes/addons/csi-hostpath-storageclass.yaml
I0912 21:30:14.344145 13904 pod_ready.go:93] pod "coredns-7c65d6cfc9-nqb66" in "kube-system" namespace has status "Ready":"True"
I0912 21:30:14.344408 13904 pod_ready.go:82] duration metric: took 1.007247965s for pod "coredns-7c65d6cfc9-nqb66" in "kube-system" namespace to be "Ready" ...
I0912 21:30:14.344443 13904 pod_ready.go:79] waiting up to 6m0s for pod "etcd-addons-207808" in "kube-system" namespace to be "Ready" ...
I0912 21:30:14.938318 13904 ssh_runner.go:362] scp memory --> /var/lib/minikube/google_application_credentials.json (162 bytes)
I0912 21:30:14.938490 13904 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-207808
I0912 21:30:14.965422 13904 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19616-5723/.minikube/machines/addons-207808/id_rsa Username:docker}
I0912 21:30:15.545132 13904 ssh_runner.go:362] scp memory --> /var/lib/minikube/google_cloud_project (12 bytes)
I0912 21:30:15.748464 13904 addons.go:234] Setting addon gcp-auth=true in "addons-207808"
I0912 21:30:15.748538 13904 host.go:66] Checking if "addons-207808" exists ...
I0912 21:30:15.749071 13904 cli_runner.go:164] Run: docker container inspect addons-207808 --format={{.State.Status}}
I0912 21:30:15.767098 13904 ssh_runner.go:195] Run: cat /var/lib/minikube/google_application_credentials.json
I0912 21:30:15.767154 13904 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-207808
I0912 21:30:15.782591 13904 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19616-5723/.minikube/machines/addons-207808/id_rsa Username:docker}
I0912 21:30:16.352296 13904 pod_ready.go:93] pod "etcd-addons-207808" in "kube-system" namespace has status "Ready":"True"
I0912 21:30:16.352320 13904 pod_ready.go:82] duration metric: took 2.007858822s for pod "etcd-addons-207808" in "kube-system" namespace to be "Ready" ...
I0912 21:30:16.352333 13904 pod_ready.go:79] waiting up to 6m0s for pod "kube-apiserver-addons-207808" in "kube-system" namespace to be "Ready" ...
I0912 21:30:16.939200 13904 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ingress-deploy.yaml: (8.403098741s)
I0912 21:30:16.939239 13904 addons.go:475] Verifying addon ingress=true in "addons-207808"
I0912 21:30:16.940721 13904 out.go:177] * Verifying ingress addon...
I0912 21:30:16.943465 13904 kapi.go:75] Waiting for pod with label "app.kubernetes.io/name=ingress-nginx" in ns "ingress-nginx" ...
I0912 21:30:16.950514 13904 kapi.go:86] Found 3 Pods for label selector app.kubernetes.io/name=ingress-nginx
I0912 21:30:16.950543 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:17.439238 13904 pod_ready.go:93] pod "kube-apiserver-addons-207808" in "kube-system" namespace has status "Ready":"True"
I0912 21:30:17.439324 13904 pod_ready.go:82] duration metric: took 1.08698066s for pod "kube-apiserver-addons-207808" in "kube-system" namespace to be "Ready" ...
I0912 21:30:17.439351 13904 pod_ready.go:79] waiting up to 6m0s for pod "kube-controller-manager-addons-207808" in "kube-system" namespace to be "Ready" ...
I0912 21:30:17.451657 13904 pod_ready.go:93] pod "kube-controller-manager-addons-207808" in "kube-system" namespace has status "Ready":"True"
I0912 21:30:17.451712 13904 pod_ready.go:82] duration metric: took 12.312718ms for pod "kube-controller-manager-addons-207808" in "kube-system" namespace to be "Ready" ...
I0912 21:30:17.451734 13904 pod_ready.go:79] waiting up to 6m0s for pod "kube-proxy-2xmvv" in "kube-system" namespace to be "Ready" ...
I0912 21:30:17.453840 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:17.457782 13904 pod_ready.go:93] pod "kube-proxy-2xmvv" in "kube-system" namespace has status "Ready":"True"
I0912 21:30:17.457801 13904 pod_ready.go:82] duration metric: took 6.053061ms for pod "kube-proxy-2xmvv" in "kube-system" namespace to be "Ready" ...
I0912 21:30:17.457821 13904 pod_ready.go:79] waiting up to 6m0s for pod "kube-scheduler-addons-207808" in "kube-system" namespace to be "Ready" ...
I0912 21:30:17.545776 13904 pod_ready.go:93] pod "kube-scheduler-addons-207808" in "kube-system" namespace has status "Ready":"True"
I0912 21:30:17.545801 13904 pod_ready.go:82] duration metric: took 87.972704ms for pod "kube-scheduler-addons-207808" in "kube-system" namespace to be "Ready" ...
I0912 21:30:17.545824 13904 pod_ready.go:79] waiting up to 6m0s for pod "nvidia-device-plugin-daemonset-mc6cs" in "kube-system" namespace to be "Ready" ...
I0912 21:30:17.949464 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:18.447795 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:18.948000 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:19.448900 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:19.644304 13904 pod_ready.go:103] pod "nvidia-device-plugin-daemonset-mc6cs" in "kube-system" namespace has status "Ready":"False"
I0912 21:30:19.949266 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:20.149915 13904 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/volcano-deployment.yaml: (11.598727194s)
I0912 21:30:20.150005 13904 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/nvidia-device-plugin.yaml: (11.594042737s)
I0912 21:30:20.150072 13904 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: (11.517497989s)
I0912 21:30:20.150347 13904 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (11.317948181s)
I0912 21:30:20.150462 13904 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner-rancher.yaml: (11.114026793s)
I0912 21:30:20.150695 13904 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/helm-tiller-dp.yaml -f /etc/kubernetes/addons/helm-tiller-rbac.yaml -f /etc/kubernetes/addons/helm-tiller-svc.yaml: (10.797802569s)
I0912 21:30:20.150772 13904 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/registry-rc.yaml -f /etc/kubernetes/addons/registry-svc.yaml -f /etc/kubernetes/addons/registry-proxy.yaml: (10.399665414s)
I0912 21:30:20.150817 13904 addons.go:475] Verifying addon registry=true in "addons-207808"
I0912 21:30:20.151243 13904 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/yakd-ns.yaml -f /etc/kubernetes/addons/yakd-sa.yaml -f /etc/kubernetes/addons/yakd-crb.yaml -f /etc/kubernetes/addons/yakd-svc.yaml -f /etc/kubernetes/addons/yakd-dp.yaml: (10.109515385s)
I0912 21:30:20.151527 13904 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: (9.010765773s)
W0912 21:30:20.151564 13904 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: Process exited with status 1
stdout:
customresourcedefinition.apiextensions.k8s.io/volumesnapshotclasses.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshotcontents.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshots.snapshot.storage.k8s.io created
serviceaccount/snapshot-controller created
clusterrole.rbac.authorization.k8s.io/snapshot-controller-runner created
clusterrolebinding.rbac.authorization.k8s.io/snapshot-controller-role created
role.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
rolebinding.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
deployment.apps/snapshot-controller created
stderr:
error: resource mapping not found for name: "csi-hostpath-snapclass" namespace: "" from "/etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml": no matches for kind "VolumeSnapshotClass" in version "snapshot.storage.k8s.io/v1"
ensure CRDs are installed first
I0912 21:30:20.151584 13904 retry.go:31] will retry after 155.618092ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: Process exited with status 1
stdout:
customresourcedefinition.apiextensions.k8s.io/volumesnapshotclasses.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshotcontents.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshots.snapshot.storage.k8s.io created
serviceaccount/snapshot-controller created
clusterrole.rbac.authorization.k8s.io/snapshot-controller-runner created
clusterrolebinding.rbac.authorization.k8s.io/snapshot-controller-role created
role.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
rolebinding.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
deployment.apps/snapshot-controller created
stderr:
error: resource mapping not found for name: "csi-hostpath-snapclass" namespace: "" from "/etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml": no matches for kind "VolumeSnapshotClass" in version "snapshot.storage.k8s.io/v1"
ensure CRDs are installed first
I0912 21:30:20.151695 13904 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ig-namespace.yaml -f /etc/kubernetes/addons/ig-serviceaccount.yaml -f /etc/kubernetes/addons/ig-role.yaml -f /etc/kubernetes/addons/ig-rolebinding.yaml -f /etc/kubernetes/addons/ig-clusterrole.yaml -f /etc/kubernetes/addons/ig-clusterrolebinding.yaml -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-daemonset.yaml: (7.413394096s)
I0912 21:30:20.151764 13904 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: (9.418302916s)
I0912 21:30:20.151788 13904 addons.go:475] Verifying addon metrics-server=true in "addons-207808"
I0912 21:30:20.153302 13904 out.go:177] * To access YAKD - Kubernetes Dashboard, wait for Pod to be ready and run the following command:
minikube -p addons-207808 service yakd-dashboard -n yakd-dashboard
I0912 21:30:20.153383 13904 out.go:177] * Verifying registry addon...
I0912 21:30:20.156421 13904 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=registry" in ns "kube-system" ...
I0912 21:30:20.235688 13904 kapi.go:86] Found 2 Pods for label selector kubernetes.io/minikube-addons=registry
I0912 21:30:20.235728 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
W0912 21:30:20.240686 13904 out.go:270] ! Enabling 'default-storageclass' returned an error: running callbacks: [Error making standard the default storage class: Error while marking storage class local-path as non-default: Operation cannot be fulfilled on storageclasses.storage.k8s.io "local-path": the object has been modified; please apply your changes to the latest version and try again]
I0912 21:30:20.308108 13904 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply --force -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I0912 21:30:20.448654 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:20.659877 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:20.947920 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:21.160682 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:21.452180 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:21.654933 13904 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/rbac-external-attacher.yaml -f /etc/kubernetes/addons/rbac-hostpath.yaml -f /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml -f /etc/kubernetes/addons/rbac-external-provisioner.yaml -f /etc/kubernetes/addons/rbac-external-resizer.yaml -f /etc/kubernetes/addons/rbac-external-snapshotter.yaml -f /etc/kubernetes/addons/csi-hostpath-attacher.yaml -f /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml -f /etc/kubernetes/addons/csi-hostpath-plugin.yaml -f /etc/kubernetes/addons/csi-hostpath-resizer.yaml -f /etc/kubernetes/addons/csi-hostpath-storageclass.yaml: (8.203187167s)
I0912 21:30:21.655059 13904 addons.go:475] Verifying addon csi-hostpath-driver=true in "addons-207808"
I0912 21:30:21.655104 13904 ssh_runner.go:235] Completed: cat /var/lib/minikube/google_application_credentials.json: (5.887972523s)
I0912 21:30:21.656354 13904 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.3
I0912 21:30:21.656358 13904 out.go:177] * Verifying csi-hostpath-driver addon...
I0912 21:30:21.660598 13904 out.go:177] - Using image gcr.io/k8s-minikube/gcp-auth-webhook:v0.1.2
I0912 21:30:21.661248 13904 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=csi-hostpath-driver" in ns "kube-system" ...
I0912 21:30:21.661967 13904 addons.go:431] installing /etc/kubernetes/addons/gcp-auth-ns.yaml
I0912 21:30:21.661985 13904 ssh_runner.go:362] scp gcp-auth/gcp-auth-ns.yaml --> /etc/kubernetes/addons/gcp-auth-ns.yaml (700 bytes)
I0912 21:30:21.663689 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:21.741800 13904 kapi.go:86] Found 3 Pods for label selector kubernetes.io/minikube-addons=csi-hostpath-driver
I0912 21:30:21.741891 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:21.753017 13904 addons.go:431] installing /etc/kubernetes/addons/gcp-auth-service.yaml
I0912 21:30:21.753047 13904 ssh_runner.go:362] scp gcp-auth/gcp-auth-service.yaml --> /etc/kubernetes/addons/gcp-auth-service.yaml (788 bytes)
I0912 21:30:21.840422 13904 addons.go:431] installing /etc/kubernetes/addons/gcp-auth-webhook.yaml
I0912 21:30:21.840447 13904 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/gcp-auth-webhook.yaml (5421 bytes)
I0912 21:30:21.862292 13904 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/gcp-auth-ns.yaml -f /etc/kubernetes/addons/gcp-auth-service.yaml -f /etc/kubernetes/addons/gcp-auth-webhook.yaml
I0912 21:30:21.947774 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:22.054161 13904 pod_ready.go:103] pod "nvidia-device-plugin-daemonset-mc6cs" in "kube-system" namespace has status "Ready":"False"
I0912 21:30:22.160775 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:22.233496 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:22.449255 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:22.733609 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:22.735827 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:22.741754 13904 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply --force -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: (2.433590338s)
I0912 21:30:22.947946 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:23.160545 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:23.161961 13904 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/gcp-auth-ns.yaml -f /etc/kubernetes/addons/gcp-auth-service.yaml -f /etc/kubernetes/addons/gcp-auth-webhook.yaml: (1.299631456s)
I0912 21:30:23.163570 13904 addons.go:475] Verifying addon gcp-auth=true in "addons-207808"
I0912 21:30:23.165096 13904 out.go:177] * Verifying gcp-auth addon...
I0912 21:30:23.167335 13904 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=gcp-auth" in ns "gcp-auth" ...
I0912 21:30:23.260081 13904 kapi.go:86] Found 0 Pods for label selector kubernetes.io/minikube-addons=gcp-auth
I0912 21:30:23.261458 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:23.447131 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:23.660613 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:23.665744 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:23.946815 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:24.160059 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:24.164984 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:24.447754 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:24.551924 13904 pod_ready.go:103] pod "nvidia-device-plugin-daemonset-mc6cs" in "kube-system" namespace has status "Ready":"False"
I0912 21:30:24.660085 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:24.664952 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:24.948454 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:25.160188 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:25.165199 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:25.447595 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:25.660038 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:25.665049 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:25.947840 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:26.159978 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:26.261664 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:26.448235 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:26.660356 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:26.665050 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:26.947800 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:27.051442 13904 pod_ready.go:103] pod "nvidia-device-plugin-daemonset-mc6cs" in "kube-system" namespace has status "Ready":"False"
I0912 21:30:27.160318 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:27.165177 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:27.448086 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:27.659735 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:27.664619 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:27.947381 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:28.159346 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:28.164714 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:28.447018 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:28.659741 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:28.664185 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:28.947758 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:29.051644 13904 pod_ready.go:103] pod "nvidia-device-plugin-daemonset-mc6cs" in "kube-system" namespace has status "Ready":"False"
I0912 21:30:29.160155 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:29.164774 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:29.447454 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:29.659647 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:29.665292 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:29.947716 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:30.160127 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:30.166468 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:30.447841 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:30.660915 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:30.665244 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:30.948260 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:31.160507 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:31.165986 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:31.448229 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:31.551785 13904 pod_ready.go:103] pod "nvidia-device-plugin-daemonset-mc6cs" in "kube-system" namespace has status "Ready":"False"
I0912 21:30:31.659644 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:31.666011 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:31.948914 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:32.160511 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:32.165258 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:32.447119 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:32.660275 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:32.664890 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:32.947478 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:33.160218 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:33.166234 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:33.447375 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:33.659899 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:33.664821 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:33.947232 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:34.050941 13904 pod_ready.go:103] pod "nvidia-device-plugin-daemonset-mc6cs" in "kube-system" namespace has status "Ready":"False"
I0912 21:30:34.159975 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:34.164605 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:34.447165 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:34.659870 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:34.664944 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:34.948013 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:35.160047 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:35.164972 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:35.448373 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:35.659879 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:35.665766 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:35.948430 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:36.052145 13904 pod_ready.go:103] pod "nvidia-device-plugin-daemonset-mc6cs" in "kube-system" namespace has status "Ready":"False"
I0912 21:30:36.160052 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:36.165199 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:36.449655 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:36.660268 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:36.664951 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:36.948535 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:37.160535 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:37.165956 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:37.447626 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:37.659739 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:37.665888 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:37.948186 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:38.160115 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:38.165217 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:38.448609 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:38.552072 13904 pod_ready.go:103] pod "nvidia-device-plugin-daemonset-mc6cs" in "kube-system" namespace has status "Ready":"False"
I0912 21:30:38.660386 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:38.665659 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:38.947114 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:39.159376 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:39.164933 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:39.447487 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:39.660199 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:39.665022 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:39.947935 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:40.159915 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:40.164757 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:40.446645 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:40.551089 13904 pod_ready.go:93] pod "nvidia-device-plugin-daemonset-mc6cs" in "kube-system" namespace has status "Ready":"True"
I0912 21:30:40.551116 13904 pod_ready.go:82] duration metric: took 23.005278837s for pod "nvidia-device-plugin-daemonset-mc6cs" in "kube-system" namespace to be "Ready" ...
I0912 21:30:40.551127 13904 pod_ready.go:39] duration metric: took 29.312842s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0912 21:30:40.551150 13904 api_server.go:52] waiting for apiserver process to appear ...
I0912 21:30:40.551215 13904 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0912 21:30:40.564545 13904 api_server.go:72] duration metric: took 32.757307666s to wait for apiserver process to appear ...
I0912 21:30:40.564569 13904 api_server.go:88] waiting for apiserver healthz status ...
I0912 21:30:40.564585 13904 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8443/healthz ...
I0912 21:30:40.568868 13904 api_server.go:279] https://192.168.49.2:8443/healthz returned 200:
ok
I0912 21:30:40.569599 13904 api_server.go:141] control plane version: v1.31.1
I0912 21:30:40.569620 13904 api_server.go:131] duration metric: took 5.045542ms to wait for apiserver health ...
I0912 21:30:40.569627 13904 system_pods.go:43] waiting for kube-system pods to appear ...
I0912 21:30:40.576038 13904 system_pods.go:59] 18 kube-system pods found
I0912 21:30:40.576067 13904 system_pods.go:61] "coredns-7c65d6cfc9-nqb66" [44c5fa36-5441-48c7-a7bd-8e2d821c77c0] Running
I0912 21:30:40.576075 13904 system_pods.go:61] "csi-hostpath-attacher-0" [f6815d37-6a4d-44f0-a067-d649e3a441a7] Pending / Ready:ContainersNotReady (containers with unready status: [csi-attacher]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-attacher])
I0912 21:30:40.576080 13904 system_pods.go:61] "csi-hostpath-resizer-0" [00aca749-1720-46ee-8e3d-37d6ff6aabfd] Pending / Ready:ContainersNotReady (containers with unready status: [csi-resizer]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-resizer])
I0912 21:30:40.576088 13904 system_pods.go:61] "csi-hostpathplugin-5dpdr" [79114267-b5df-4335-a1bc-43b76311472c] Pending / Ready:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter])
I0912 21:30:40.576093 13904 system_pods.go:61] "etcd-addons-207808" [55583cad-3793-4a65-b549-341872f500f2] Running
I0912 21:30:40.576097 13904 system_pods.go:61] "kube-apiserver-addons-207808" [62f90147-f7b6-4a55-98e3-e6c6c657bb9f] Running
I0912 21:30:40.576101 13904 system_pods.go:61] "kube-controller-manager-addons-207808" [770fb5d8-a95f-4c79-8890-b4b3967d8ba0] Running
I0912 21:30:40.576104 13904 system_pods.go:61] "kube-ingress-dns-minikube" [13cca3f9-c8f2-4cc9-8605-5d8961e06c0c] Running
I0912 21:30:40.576107 13904 system_pods.go:61] "kube-proxy-2xmvv" [82d22286-ca1b-4a37-88ea-a0dc0c1fa9fd] Running
I0912 21:30:40.576111 13904 system_pods.go:61] "kube-scheduler-addons-207808" [7e3b3ace-ac55-4804-b7c9-819dc64a505f] Running
I0912 21:30:40.576115 13904 system_pods.go:61] "metrics-server-84c5f94fbc-qp9pj" [467286ab-a1a8-4e01-aef7-f92c567162ba] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
I0912 21:30:40.576121 13904 system_pods.go:61] "nvidia-device-plugin-daemonset-mc6cs" [1c6b255b-a9a3-49d2-9fac-3dee50123ecc] Running
I0912 21:30:40.576127 13904 system_pods.go:61] "registry-66c9cd494c-mdbsb" [6646693e-e468-4f8c-a209-9f028e31da67] Pending / Ready:ContainersNotReady (containers with unready status: [registry]) / ContainersReady:ContainersNotReady (containers with unready status: [registry])
I0912 21:30:40.576135 13904 system_pods.go:61] "registry-proxy-fjxbz" [6340cd55-7e16-4315-8b01-5e879a2b0d76] Pending / Ready:ContainersNotReady (containers with unready status: [registry-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [registry-proxy])
I0912 21:30:40.576142 13904 system_pods.go:61] "snapshot-controller-56fcc65765-lc6mh" [75d1061f-fc5b-42bc-a091-c587ce534a9a] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
I0912 21:30:40.576149 13904 system_pods.go:61] "snapshot-controller-56fcc65765-tczjb" [f4c0d99d-69a7-411d-bd82-833a4a9dc9a4] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
I0912 21:30:40.576155 13904 system_pods.go:61] "storage-provisioner" [62c01ea5-9b66-45e4-9e9f-1ab26c0298a2] Running
I0912 21:30:40.576161 13904 system_pods.go:61] "tiller-deploy-b48cc5f79-lnb7p" [7df8afba-a05c-403e-a96c-3556b198e183] Pending / Ready:ContainersNotReady (containers with unready status: [tiller]) / ContainersReady:ContainersNotReady (containers with unready status: [tiller])
I0912 21:30:40.576168 13904 system_pods.go:74] duration metric: took 6.536762ms to wait for pod list to return data ...
I0912 21:30:40.576178 13904 default_sa.go:34] waiting for default service account to be created ...
I0912 21:30:40.577942 13904 default_sa.go:45] found service account: "default"
I0912 21:30:40.577959 13904 default_sa.go:55] duration metric: took 1.774448ms for default service account to be created ...
I0912 21:30:40.577967 13904 system_pods.go:116] waiting for k8s-apps to be running ...
I0912 21:30:40.584402 13904 system_pods.go:86] 18 kube-system pods found
I0912 21:30:40.584426 13904 system_pods.go:89] "coredns-7c65d6cfc9-nqb66" [44c5fa36-5441-48c7-a7bd-8e2d821c77c0] Running
I0912 21:30:40.584434 13904 system_pods.go:89] "csi-hostpath-attacher-0" [f6815d37-6a4d-44f0-a067-d649e3a441a7] Pending / Ready:ContainersNotReady (containers with unready status: [csi-attacher]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-attacher])
I0912 21:30:40.584442 13904 system_pods.go:89] "csi-hostpath-resizer-0" [00aca749-1720-46ee-8e3d-37d6ff6aabfd] Pending / Ready:ContainersNotReady (containers with unready status: [csi-resizer]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-resizer])
I0912 21:30:40.584451 13904 system_pods.go:89] "csi-hostpathplugin-5dpdr" [79114267-b5df-4335-a1bc-43b76311472c] Pending / Ready:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter])
I0912 21:30:40.584455 13904 system_pods.go:89] "etcd-addons-207808" [55583cad-3793-4a65-b549-341872f500f2] Running
I0912 21:30:40.584461 13904 system_pods.go:89] "kube-apiserver-addons-207808" [62f90147-f7b6-4a55-98e3-e6c6c657bb9f] Running
I0912 21:30:40.584466 13904 system_pods.go:89] "kube-controller-manager-addons-207808" [770fb5d8-a95f-4c79-8890-b4b3967d8ba0] Running
I0912 21:30:40.584471 13904 system_pods.go:89] "kube-ingress-dns-minikube" [13cca3f9-c8f2-4cc9-8605-5d8961e06c0c] Running
I0912 21:30:40.584474 13904 system_pods.go:89] "kube-proxy-2xmvv" [82d22286-ca1b-4a37-88ea-a0dc0c1fa9fd] Running
I0912 21:30:40.584478 13904 system_pods.go:89] "kube-scheduler-addons-207808" [7e3b3ace-ac55-4804-b7c9-819dc64a505f] Running
I0912 21:30:40.584485 13904 system_pods.go:89] "metrics-server-84c5f94fbc-qp9pj" [467286ab-a1a8-4e01-aef7-f92c567162ba] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
I0912 21:30:40.584489 13904 system_pods.go:89] "nvidia-device-plugin-daemonset-mc6cs" [1c6b255b-a9a3-49d2-9fac-3dee50123ecc] Running
I0912 21:30:40.584497 13904 system_pods.go:89] "registry-66c9cd494c-mdbsb" [6646693e-e468-4f8c-a209-9f028e31da67] Pending / Ready:ContainersNotReady (containers with unready status: [registry]) / ContainersReady:ContainersNotReady (containers with unready status: [registry])
I0912 21:30:40.584504 13904 system_pods.go:89] "registry-proxy-fjxbz" [6340cd55-7e16-4315-8b01-5e879a2b0d76] Pending / Ready:ContainersNotReady (containers with unready status: [registry-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [registry-proxy])
I0912 21:30:40.584512 13904 system_pods.go:89] "snapshot-controller-56fcc65765-lc6mh" [75d1061f-fc5b-42bc-a091-c587ce534a9a] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
I0912 21:30:40.584518 13904 system_pods.go:89] "snapshot-controller-56fcc65765-tczjb" [f4c0d99d-69a7-411d-bd82-833a4a9dc9a4] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
I0912 21:30:40.584524 13904 system_pods.go:89] "storage-provisioner" [62c01ea5-9b66-45e4-9e9f-1ab26c0298a2] Running
I0912 21:30:40.584531 13904 system_pods.go:89] "tiller-deploy-b48cc5f79-lnb7p" [7df8afba-a05c-403e-a96c-3556b198e183] Pending / Ready:ContainersNotReady (containers with unready status: [tiller]) / ContainersReady:ContainersNotReady (containers with unready status: [tiller])
I0912 21:30:40.584540 13904 system_pods.go:126] duration metric: took 6.568081ms to wait for k8s-apps to be running ...
I0912 21:30:40.584548 13904 system_svc.go:44] waiting for kubelet service to be running ....
I0912 21:30:40.584587 13904 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I0912 21:30:40.595535 13904 system_svc.go:56] duration metric: took 10.97749ms WaitForService to wait for kubelet
I0912 21:30:40.595564 13904 kubeadm.go:582] duration metric: took 32.788327796s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0912 21:30:40.595586 13904 node_conditions.go:102] verifying NodePressure condition ...
I0912 21:30:40.598377 13904 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I0912 21:30:40.598400 13904 node_conditions.go:123] node cpu capacity is 8
I0912 21:30:40.598412 13904 node_conditions.go:105] duration metric: took 2.821948ms to run NodePressure ...
I0912 21:30:40.598423 13904 start.go:241] waiting for startup goroutines ...
I0912 21:30:40.659881 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:40.666903 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:40.947605 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:41.160323 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:41.165222 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:41.447099 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:41.659469 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:41.665630 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:41.947973 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:42.160688 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:42.165715 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:42.447275 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:42.659736 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:42.665903 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:42.947755 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:43.161041 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:43.164986 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:43.447649 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:43.659984 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:43.664792 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:43.947048 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:44.160589 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:44.165201 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:44.447733 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:44.659815 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:44.664978 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:44.947222 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:45.160020 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:45.164822 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:45.447503 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:45.659636 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:45.665385 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:45.947634 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:46.159848 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:46.165311 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:46.447981 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:46.659658 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:46.665225 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:46.948001 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:47.160123 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:47.164745 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:47.448312 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:47.659830 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:47.664969 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:47.948241 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:48.159374 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:48.165844 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:48.447122 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:48.659854 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:48.665473 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:48.947634 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:49.159931 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:49.164920 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:49.448060 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:49.660720 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:49.666057 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:49.947771 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:50.228658 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:50.229173 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:50.448399 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:50.660099 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:50.664684 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:50.947600 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:51.160337 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0912 21:30:51.165719 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:51.447592 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:51.659863 13904 kapi.go:107] duration metric: took 31.503437258s to wait for kubernetes.io/minikube-addons=registry ...
I0912 21:30:51.664880 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:51.947586 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:52.165571 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:52.448265 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:52.665878 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:52.947298 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:53.165119 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:53.447785 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:53.666609 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:53.947629 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:54.166123 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:54.447977 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:54.666094 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:54.947244 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:55.165021 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:55.448257 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:55.664711 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:55.947476 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:56.166526 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:56.448650 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:56.666252 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:56.948523 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:57.166291 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:57.447780 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:57.666084 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:57.948401 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:58.165936 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:58.447558 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:58.665968 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:58.953178 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:59.165310 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:59.448175 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:30:59.665585 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:30:59.947281 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:00.165965 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:31:00.450768 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:00.666303 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:31:00.947818 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:01.166482 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:31:01.447446 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:01.665844 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:31:01.947302 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:02.165094 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:31:02.447921 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:02.666262 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:31:02.948207 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:03.166246 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:31:03.448027 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:03.666604 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:31:03.947791 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:04.166495 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:31:04.447472 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:04.665618 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:31:04.947708 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:05.166284 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:31:05.447016 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:05.664924 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:31:05.947625 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:06.165914 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:31:06.447756 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:06.666707 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:31:06.948213 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:07.166481 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:31:07.447660 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:07.666042 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:31:07.948893 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:08.166206 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:31:08.448068 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:08.666661 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:31:08.947701 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:09.165825 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:31:09.447918 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:09.666559 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:31:09.947213 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:10.165677 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:31:10.447390 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:10.666303 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:31:10.947467 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:11.166447 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:31:11.447651 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:11.666650 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:31:11.948675 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:12.166424 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:31:12.447357 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:12.665383 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:31:12.948079 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:13.166897 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:31:13.448474 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:13.665744 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:31:13.947985 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:14.165753 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:31:14.448718 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:14.665611 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:31:14.948568 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:15.165693 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:31:15.447695 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:15.665813 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:31:15.947838 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:16.166934 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:31:16.447387 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:16.665915 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:31:16.947940 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:17.166407 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:31:17.447963 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:17.665187 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0912 21:31:17.947195 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:18.164674 13904 kapi.go:107] duration metric: took 56.503424612s to wait for kubernetes.io/minikube-addons=csi-hostpath-driver ...
I0912 21:31:18.447140 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:18.947219 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:19.447617 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:19.946901 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:20.448110 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:20.947280 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:21.448340 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:21.948960 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:22.447861 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:22.948709 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:23.448294 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:23.947640 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:24.447243 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:24.947949 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:25.583873 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:25.948214 13904 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0912 21:31:26.447710 13904 kapi.go:107] duration metric: took 1m9.504245832s to wait for app.kubernetes.io/name=ingress-nginx ...
I0912 21:31:46.173201 13904 kapi.go:86] Found 1 Pods for label selector kubernetes.io/minikube-addons=gcp-auth
I0912 21:31:46.173232 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:31:46.670456 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:31:47.170522 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:31:47.670101 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:31:48.170271 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:31:48.670604 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:31:49.170459 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:31:49.670439 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:31:50.170226 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:31:50.670944 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:31:51.170943 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:31:51.670810 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:31:52.171348 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:31:52.669875 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:31:53.171954 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:31:53.670753 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:31:54.170226 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:31:54.671055 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:31:55.170472 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:31:55.670659 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:31:56.170435 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:31:56.670749 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:31:57.170329 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:31:57.669848 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:31:58.171511 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:31:58.670025 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:31:59.170491 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:31:59.671119 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:00.170627 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:00.670721 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:01.170511 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:01.670667 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:02.170913 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:02.671054 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:03.173204 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:03.670852 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:04.170848 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:04.670920 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:05.170756 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:05.670898 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:06.171041 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:06.670185 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:07.170559 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:07.670465 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:08.170073 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:08.670545 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:09.170700 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:09.670647 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:10.170715 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:10.670581 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:11.170533 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:11.670708 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:12.170717 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:12.670471 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:13.169967 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:13.671087 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:14.171012 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:14.670632 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:15.170655 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:15.670651 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:16.170159 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:16.670410 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:17.170242 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:17.670909 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:18.171075 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:18.670699 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:19.170445 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:19.670364 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:20.169838 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:20.670889 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:21.171015 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:21.671209 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:22.170780 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:22.670909 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:23.170400 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:23.670358 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:24.169929 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:24.670662 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:25.170685 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:25.670298 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:26.169796 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:26.671171 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:27.170859 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:27.670719 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:28.170438 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:28.670156 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:29.170262 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:29.671181 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:30.170705 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:30.670792 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:31.170924 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:31.671207 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:32.170842 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:32.670890 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:33.170733 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:33.671013 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:34.170473 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:34.670252 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:35.171137 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:35.670943 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:36.170487 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:36.670605 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:37.170412 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:37.670929 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:38.170486 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:38.670138 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:39.170514 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:39.670690 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:40.170114 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:40.670954 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:41.171321 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:41.670253 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:42.170903 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:42.670885 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:43.170881 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:43.670654 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:44.170210 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:44.670572 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:45.170045 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:45.671048 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:46.170763 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:46.670759 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:47.170711 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:47.671063 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:48.170722 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:48.670606 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:49.170502 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:49.670499 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:50.170308 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:50.670315 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:51.170160 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:51.670372 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:52.170447 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:52.670234 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:53.170924 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:53.671151 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:54.170599 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:54.670269 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:55.170452 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:55.670831 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:56.170438 13904 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0912 21:32:56.670610 13904 kapi.go:107] duration metric: took 2m33.503269793s to wait for kubernetes.io/minikube-addons=gcp-auth ...
I0912 21:32:56.672262 13904 out.go:177] * Your GCP credentials will now be mounted into every pod created in the addons-207808 cluster.
I0912 21:32:56.673512 13904 out.go:177] * If you don't want your credentials mounted into a specific pod, add a label with the `gcp-auth-skip-secret` key to your pod configuration.
I0912 21:32:56.674932 13904 out.go:177] * If you want existing pods to be mounted with credentials, either recreate them or rerun addons enable with --refresh.
I0912 21:32:56.676163 13904 out.go:177] * Enabled addons: ingress-dns, cloud-spanner, volcano, nvidia-device-plugin, storage-provisioner, helm-tiller, inspektor-gadget, metrics-server, yakd, storage-provisioner-rancher, volumesnapshots, registry, csi-hostpath-driver, ingress, gcp-auth
I0912 21:32:56.677537 13904 addons.go:510] duration metric: took 2m48.870266282s for enable addons: enabled=[ingress-dns cloud-spanner volcano nvidia-device-plugin storage-provisioner helm-tiller inspektor-gadget metrics-server yakd storage-provisioner-rancher volumesnapshots registry csi-hostpath-driver ingress gcp-auth]
I0912 21:32:56.677586 13904 start.go:246] waiting for cluster config update ...
I0912 21:32:56.677613 13904 start.go:255] writing updated cluster config ...
I0912 21:32:56.677871 13904 ssh_runner.go:195] Run: rm -f paused
I0912 21:32:56.725486 13904 start.go:600] kubectl: 1.31.0, cluster: 1.31.1 (minor skew: 0)
I0912 21:32:56.727190 13904 out.go:177] * Done! kubectl is now configured to use "addons-207808" cluster and "default" namespace by default
==> Docker <==
Sep 12 21:42:23 addons-207808 dockerd[1336]: time="2024-09-12T21:42:23.151871292Z" level=info msg="ignoring event" container=a3638bd0a4280dfe885223d87d8e50f9f34928cba99225ae06528c31797152aa module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 12 21:42:23 addons-207808 dockerd[1336]: time="2024-09-12T21:42:23.343464854Z" level=info msg="ignoring event" container=60c1923675e82074f18d0f835a978ad7f4b377abb04e8f796bffc47becf90dba module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 12 21:42:23 addons-207808 dockerd[1336]: time="2024-09-12T21:42:23.364492132Z" level=info msg="ignoring event" container=2054bb549cfe8fd31558100a32c3f65bb88ea53eecb94f25fc2b9a14747d9348 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 12 21:42:25 addons-207808 dockerd[1336]: time="2024-09-12T21:42:25.477309282Z" level=info msg="ignoring event" container=90b12721628d414d615c930f90b560e234d24af0ce198bf3e857768ded6f68a1 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 12 21:42:29 addons-207808 cri-dockerd[1600]: time="2024-09-12T21:42:29Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/b6e7db378c3cd3f2e8eed4ae53adafc8d2e09d5b28e3548c239883be504d15c8/resolv.conf as [nameserver 10.96.0.10 search default.svc.cluster.local svc.cluster.local cluster.local europe-west1-b.c.k8s-minikube.internal c.k8s-minikube.internal google.internal options ndots:5]"
Sep 12 21:42:29 addons-207808 dockerd[1336]: time="2024-09-12T21:42:29.769894325Z" level=info msg="ignoring event" container=f516a09ad09165c94a45d23bdb0a5e9fde77a4dbec47ae5fde8af52ae1642f50 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 12 21:42:29 addons-207808 dockerd[1336]: time="2024-09-12T21:42:29.892554753Z" level=info msg="ignoring event" container=abdbea8991fa2d3b8d4dbd4ba546e7f268a8b880a0c5296d869191ab5a46d5c0 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 12 21:42:31 addons-207808 dockerd[1336]: time="2024-09-12T21:42:31.881886899Z" level=info msg="Container failed to exit within 30s of signal 15 - using the force" container=5f3b0a50b0d1d081bd93e083f67a509be98935e222df3105b9ec18d61793c6eb
Sep 12 21:42:31 addons-207808 dockerd[1336]: time="2024-09-12T21:42:31.902803627Z" level=info msg="ignoring event" container=5f3b0a50b0d1d081bd93e083f67a509be98935e222df3105b9ec18d61793c6eb module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 12 21:42:32 addons-207808 dockerd[1336]: time="2024-09-12T21:42:32.024801355Z" level=info msg="ignoring event" container=faad0b94ce6fd08c1404da42fa4a1d9481c764f556b9757a64b282e31887f092 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 12 21:42:32 addons-207808 cri-dockerd[1600]: time="2024-09-12T21:42:32Z" level=info msg="Stop pulling image docker.io/nginx:alpine: Status: Downloaded newer image for nginx:alpine"
Sep 12 21:42:34 addons-207808 dockerd[1336]: time="2024-09-12T21:42:34.052977877Z" level=info msg="Attempting next endpoint for pull after error: Head \"https://gcr.io/v2/k8s-minikube/busybox/manifests/latest\": unauthorized: authentication failed"
Sep 12 21:42:34 addons-207808 dockerd[1336]: time="2024-09-12T21:42:34.055137119Z" level=error msg="Handler for POST /v1.43/images/create returned error: Head \"https://gcr.io/v2/k8s-minikube/busybox/manifests/latest\": unauthorized: authentication failed"
Sep 12 21:42:41 addons-207808 cri-dockerd[1600]: time="2024-09-12T21:42:41Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/021b0712666e2706de5ceba9945caa927e89c67b2e46a6342a45a3bb2ca68abc/resolv.conf as [nameserver 10.96.0.10 search default.svc.cluster.local svc.cluster.local cluster.local europe-west1-b.c.k8s-minikube.internal c.k8s-minikube.internal google.internal options ndots:5]"
Sep 12 21:42:41 addons-207808 dockerd[1336]: time="2024-09-12T21:42:41.309649014Z" level=info msg="ignoring event" container=310f6cda8651a5aee0c264a1c549339bf23995fc7c5d6de645d9fa9104114f40 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 12 21:42:41 addons-207808 dockerd[1336]: time="2024-09-12T21:42:41.356039684Z" level=info msg="ignoring event" container=85ba7338cac4953a48d8cc78e30f27f60ae1d576a7b59f695add8592b0a24286 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 12 21:42:42 addons-207808 cri-dockerd[1600]: time="2024-09-12T21:42:42Z" level=info msg="Stop pulling image docker.io/kicbase/echo-server:1.0: Status: Downloaded newer image for kicbase/echo-server:1.0"
Sep 12 21:42:45 addons-207808 dockerd[1336]: time="2024-09-12T21:42:45.959507118Z" level=info msg="Container failed to exit within 2s of signal 15 - using the force" container=e57e88a01fbdbd94e7819dce90ab02e75d11fa905be87d0653d02d2efd94258f
Sep 12 21:42:46 addons-207808 dockerd[1336]: time="2024-09-12T21:42:46.020511789Z" level=info msg="ignoring event" container=e57e88a01fbdbd94e7819dce90ab02e75d11fa905be87d0653d02d2efd94258f module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 12 21:42:46 addons-207808 dockerd[1336]: time="2024-09-12T21:42:46.181863222Z" level=info msg="ignoring event" container=72fec6cdaefabc07f057ae112d72525688238a33fbbbd4719aae57249b6ca97e module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 12 21:42:50 addons-207808 dockerd[1336]: time="2024-09-12T21:42:50.087674267Z" level=info msg="ignoring event" container=3b12d368af1ba6e620fed65557d117417791262f1f1af8bd50ce5e877eb5b1f1 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 12 21:42:50 addons-207808 dockerd[1336]: time="2024-09-12T21:42:50.549503774Z" level=info msg="ignoring event" container=67d8edc63cc4087ea595b825b3ea2af0672849eee4ba88641e5ce19407f5d55c module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 12 21:42:50 addons-207808 dockerd[1336]: time="2024-09-12T21:42:50.607642926Z" level=info msg="ignoring event" container=7a19ac2f77504a3fb6429ea0692fe0ffc3ecdd7809aa8bfe68b5df69bb727402 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 12 21:42:50 addons-207808 dockerd[1336]: time="2024-09-12T21:42:50.679677069Z" level=info msg="ignoring event" container=3be73be1866a200295385c874a5d80efdf5f89162f040328319efcc382140840 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 12 21:42:50 addons-207808 dockerd[1336]: time="2024-09-12T21:42:50.768454694Z" level=info msg="ignoring event" container=78a5a06d4a04940896365e6b7e1e5f470f743c014d0d5a5aca3be14ebb623799 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD
aac269b22493d kicbase/echo-server@sha256:127ac38a2bb9537b7f252addff209ea6801edcac8a92c8b1104dacd66a583ed6 9 seconds ago Running hello-world-app 0 021b0712666e2 hello-world-app-55bf9c44b4-mjcpw
81bd03845048c nginx@sha256:a5127daff3d6f4606be3100a252419bfa84fd6ee5cd74d0feaca1a5068f97dcf 19 seconds ago Running nginx 0 b6e7db378c3cd nginx
07a6fb1324f97 a416a98b71e22 50 seconds ago Exited helper-pod 0 de26f9f5bcc4b helper-pod-delete-pvc-b1ba2409-c488-4cdf-b0b8-4d252d606c73
56a330f7d94aa gcr.io/k8s-minikube/gcp-auth-webhook@sha256:e6c5b3bc32072ea370d34c27836efd11b3519d25bd444c2a8efc339cff0e20fb 9 minutes ago Running gcp-auth 0 81f5fd67c17a0 gcp-auth-89d5ffd79-mhh85
dffc69edded62 ce263a8653f9c 11 minutes ago Exited patch 1 9d4dc462cdc00 ingress-nginx-admission-patch-ns57n
29450d646ef4a registry.k8s.io/ingress-nginx/kube-webhook-certgen@sha256:a320a50cc91bd15fd2d6fa6de58bd98c1bd64b9a6f926ce23a600d87043455a3 11 minutes ago Exited create 0 96574fa220383 ingress-nginx-admission-create-t9v69
7a19ac2f77504 gcr.io/k8s-minikube/kube-registry-proxy@sha256:b3fa0b2df8737fdb85ad5918a7e2652527463e357afff83a5e5bb966bcedc367 12 minutes ago Exited registry-proxy 0 78a5a06d4a049 registry-proxy-fjxbz
67d8edc63cc40 registry@sha256:ac0192b549007e22998eb74e8d8488dcfe70f1489520c3b144a6047ac5efbe90 12 minutes ago Exited registry 0 3be73be1866a2 registry-66c9cd494c-mdbsb
fa068715e0b78 6e38f40d628db 12 minutes ago Running storage-provisioner 0 aa2f447a14793 storage-provisioner
3b169979e5097 c69fa2e9cbf5f 12 minutes ago Running coredns 0 dd0b54a1e4b9f coredns-7c65d6cfc9-nqb66
659c75feb9a77 60c005f310ff3 12 minutes ago Running kube-proxy 0 8f2dd52e26f96 kube-proxy-2xmvv
8979ec8fc868f 9aa1fad941575 12 minutes ago Running kube-scheduler 0 702e999f2e086 kube-scheduler-addons-207808
c068506a2ce86 2e96e5913fc06 12 minutes ago Running etcd 0 15f6418116d27 etcd-addons-207808
7608d9143ec64 175ffd71cce3d 12 minutes ago Running kube-controller-manager 0 6649c16b423ab kube-controller-manager-addons-207808
1bac8a599bc4b 6bab7719df100 12 minutes ago Running kube-apiserver 0 08079b0f3308d kube-apiserver-addons-207808
==> coredns [3b169979e509] <==
[INFO] 10.244.0.22:45721 - 29979 "AAAA IN hello-world-app.default.svc.cluster.local.c.k8s-minikube.internal. udp 83 false 512" NXDOMAIN qr,rd,ra 83 0.00311285s
[INFO] 10.244.0.22:51640 - 25692 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.005193991s
[INFO] 10.244.0.22:46983 - 26825 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.005229929s
[INFO] 10.244.0.22:56173 - 2295 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.0058611s
[INFO] 10.244.0.22:46102 - 51476 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.005440425s
[INFO] 10.244.0.22:40390 - 58896 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.005441496s
[INFO] 10.244.0.22:45721 - 22303 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.005506211s
[INFO] 10.244.0.22:55982 - 25996 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.005636843s
[INFO] 10.244.0.22:44762 - 13853 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.00568787s
[INFO] 10.244.0.22:51640 - 41022 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.003879842s
[INFO] 10.244.0.22:44762 - 4340 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.003509116s
[INFO] 10.244.0.22:45721 - 49477 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.003730443s
[INFO] 10.244.0.22:40390 - 22932 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.003755494s
[INFO] 10.244.0.22:46102 - 35464 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.003861806s
[INFO] 10.244.0.22:56173 - 4643 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.003804222s
[INFO] 10.244.0.22:55982 - 1235 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.003749399s
[INFO] 10.244.0.22:46983 - 9553 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.004133971s
[INFO] 10.244.0.22:44762 - 51125 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000063336s
[INFO] 10.244.0.22:45721 - 10639 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000052434s
[INFO] 10.244.0.22:40390 - 27164 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000060738s
[INFO] 10.244.0.22:56173 - 40027 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.00012687s
[INFO] 10.244.0.22:46102 - 20721 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000206803s
[INFO] 10.244.0.22:51640 - 26586 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000236518s
[INFO] 10.244.0.22:46983 - 61600 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.00016543s
[INFO] 10.244.0.22:55982 - 47647 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000191203s
==> describe nodes <==
Name: addons-207808
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=addons-207808
kubernetes.io/os=linux
minikube.k8s.io/commit=f6bc674a17941874d4e5b792b09c1791d30622b8
minikube.k8s.io/name=addons-207808
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2024_09_12T21_30_02_0700
minikube.k8s.io/version=v1.34.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
topology.hostpath.csi/node=addons-207808
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///var/run/cri-dockerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Thu, 12 Sep 2024 21:29:59 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: addons-207808
AcquireTime: <unset>
RenewTime: Thu, 12 Sep 2024 21:42:46 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Thu, 12 Sep 2024 21:42:36 +0000 Thu, 12 Sep 2024 21:29:58 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Thu, 12 Sep 2024 21:42:36 +0000 Thu, 12 Sep 2024 21:29:58 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Thu, 12 Sep 2024 21:42:36 +0000 Thu, 12 Sep 2024 21:29:58 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Thu, 12 Sep 2024 21:42:36 +0000 Thu, 12 Sep 2024 21:29:59 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.49.2
Hostname: addons-207808
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32859316Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32859316Ki
pods: 110
System Info:
Machine ID: 1003a0e88b5347198be46e2083b504f7
System UUID: 69c6a5ac-901a-4554-9494-158d4279ef9e
Boot ID: 178756ce-17ec-4b96-b240-8a8b9997ee1b
Kernel Version: 5.15.0-1067-gcp
OS Image: Ubuntu 22.04.4 LTS
Operating System: linux
Architecture: amd64
Container Runtime Version: docker://27.2.1
Kubelet Version: v1.31.1
Kube-Proxy Version: v1.31.1
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (11 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 9m15s
default hello-world-app-55bf9c44b4-mjcpw 0 (0%) 0 (0%) 0 (0%) 0 (0%) 11s
default nginx 0 (0%) 0 (0%) 0 (0%) 0 (0%) 22s
gcp-auth gcp-auth-89d5ffd79-mhh85 0 (0%) 0 (0%) 0 (0%) 0 (0%) 11m
kube-system coredns-7c65d6cfc9-nqb66 100m (1%) 0 (0%) 70Mi (0%) 170Mi (0%) 12m
kube-system etcd-addons-207808 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 12m
kube-system kube-apiserver-addons-207808 250m (3%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system kube-controller-manager-addons-207808 200m (2%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system kube-proxy-2xmvv 0 (0%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system kube-scheduler-addons-207808 100m (1%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 12m
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 750m (9%) 0 (0%)
memory 170Mi (0%) 170Mi (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 12m kube-proxy
Normal Starting 12m kubelet Starting kubelet.
Warning CgroupV1 12m kubelet Cgroup v1 support is in maintenance mode, please migrate to Cgroup v2.
Normal NodeAllocatableEnforced 12m kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 12m kubelet Node addons-207808 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 12m kubelet Node addons-207808 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 12m kubelet Node addons-207808 status is now: NodeHasSufficientPID
Normal RegisteredNode 12m node-controller Node addons-207808 event: Registered Node addons-207808 in Controller
==> dmesg <==
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff 76 42 d8 3f c2 55 08 06
[ +6.106396] IPv4: martian source 10.244.0.1 from 10.244.0.21, on dev eth0
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff 32 b8 43 61 01 1a 08 06
[ +0.088573] IPv4: martian source 10.244.0.1 from 10.244.0.20, on dev eth0
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff 5e cb 23 95 1a 12 08 06
[ +0.102869] IPv4: martian source 10.244.0.1 from 10.244.0.19, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff 92 02 6d 20 66 41 08 06
[ +10.410130] IPv4: martian source 10.244.0.1 from 10.244.0.22, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff aa 1c 16 68 53 f6 08 06
[ +1.032910] IPv4: martian source 10.244.0.1 from 10.244.0.23, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff 22 67 34 69 3d 1b 08 06
[Sep12 21:32] IPv4: martian source 10.244.0.1 from 10.244.0.24, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff f2 7b 61 91 0f 62 08 06
[ +0.042796] IPv4: martian source 10.244.0.1 from 10.244.0.25, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff ae 79 8e c9 48 49 08 06
[ +29.256272] IPv4: martian source 10.244.0.1 from 10.244.0.26, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff 52 5a 94 2c 58 d2 08 06
[ +0.000427] IPv4: martian source 10.244.0.26 from 10.244.0.2, on dev eth0
[ +0.000004] ll header: 00000000: ff ff ff ff ff ff ca ab 1b 6a f4 9b 08 06
[Sep12 21:41] IPv4: martian source 10.244.0.1 from 10.244.0.29, on dev eth0
[ +0.000010] ll header: 00000000: ff ff ff ff ff ff f2 1c ad 3c 84 b4 08 06
[Sep12 21:42] IPv4: martian source 10.244.0.37 from 10.244.0.22, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff aa 1c 16 68 53 f6 08 06
[ +1.627498] IPv4: martian source 10.244.0.22 from 10.244.0.2, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff ca ab 1b 6a f4 9b 08 06
==> etcd [c068506a2ce8] <==
{"level":"info","ts":"2024-09-12T21:29:57.948350Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc received MsgVoteResp from aec36adc501070cc at term 2"}
{"level":"info","ts":"2024-09-12T21:29:57.948362Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became leader at term 2"}
{"level":"info","ts":"2024-09-12T21:29:57.948376Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: aec36adc501070cc elected leader aec36adc501070cc at term 2"}
{"level":"info","ts":"2024-09-12T21:29:57.949208Z","caller":"etcdserver/server.go:2629","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2024-09-12T21:29:57.949742Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2024-09-12T21:29:57.949740Z","caller":"etcdserver/server.go:2118","msg":"published local member to cluster through raft","local-member-id":"aec36adc501070cc","local-member-attributes":"{Name:addons-207808 ClientURLs:[https://192.168.49.2:2379]}","request-path":"/0/members/aec36adc501070cc/attributes","cluster-id":"fa54960ea34d58be","publish-timeout":"7s"}
{"level":"info","ts":"2024-09-12T21:29:57.949767Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2024-09-12T21:29:57.950019Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"fa54960ea34d58be","local-member-id":"aec36adc501070cc","cluster-version":"3.5"}
{"level":"info","ts":"2024-09-12T21:29:57.950026Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2024-09-12T21:29:57.950077Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2024-09-12T21:29:57.950107Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2024-09-12T21:29:57.950140Z","caller":"etcdserver/server.go:2653","msg":"cluster version is updated","cluster-version":"3.5"}
{"level":"info","ts":"2024-09-12T21:29:57.951036Z","caller":"v3rpc/health.go:61","msg":"grpc service status changed","service":"","status":"SERVING"}
{"level":"info","ts":"2024-09-12T21:29:57.951047Z","caller":"v3rpc/health.go:61","msg":"grpc service status changed","service":"","status":"SERVING"}
{"level":"info","ts":"2024-09-12T21:29:57.952238Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"info","ts":"2024-09-12T21:29:57.952240Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.49.2:2379"}
{"level":"warn","ts":"2024-09-12T21:30:16.033998Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"102.596986ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/minions/addons-207808\" ","response":"range_response_count:1 size:4404"}
{"level":"info","ts":"2024-09-12T21:30:16.034082Z","caller":"traceutil/trace.go:171","msg":"trace[1030330109] range","detail":"{range_begin:/registry/minions/addons-207808; range_end:; response_count:1; response_revision:619; }","duration":"102.698267ms","start":"2024-09-12T21:30:15.931366Z","end":"2024-09-12T21:30:16.034064Z","steps":["trace[1030330109] 'range keys from in-memory index tree' (duration: 102.473609ms)"],"step_count":1}
{"level":"warn","ts":"2024-09-12T21:31:25.581068Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"136.304322ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/pods\" limit:1 ","response":"range_response_count:0 size:5"}
{"level":"info","ts":"2024-09-12T21:31:25.581140Z","caller":"traceutil/trace.go:171","msg":"trace[466196732] range","detail":"{range_begin:/registry/pods; range_end:; response_count:0; response_revision:1238; }","duration":"136.410983ms","start":"2024-09-12T21:31:25.444715Z","end":"2024-09-12T21:31:25.581126Z","steps":["trace[466196732] 'range keys from in-memory index tree' (duration: 136.247832ms)"],"step_count":1}
{"level":"info","ts":"2024-09-12T21:39:58.361661Z","caller":"mvcc/index.go:214","msg":"compact tree index","revision":1870}
{"level":"info","ts":"2024-09-12T21:39:58.385652Z","caller":"mvcc/kvstore_compaction.go:69","msg":"finished scheduled compaction","compact-revision":1870,"took":"23.470892ms","hash":984981064,"current-db-size-bytes":8781824,"current-db-size":"8.8 MB","current-db-size-in-use-bytes":4980736,"current-db-size-in-use":"5.0 MB"}
{"level":"info","ts":"2024-09-12T21:39:58.385701Z","caller":"mvcc/hash.go:137","msg":"storing new hash","hash":984981064,"revision":1870,"compact-revision":-1}
{"level":"info","ts":"2024-09-12T21:42:05.754232Z","caller":"traceutil/trace.go:171","msg":"trace[696321980] transaction","detail":"{read_only:false; response_revision:2680; number_of_response:1; }","duration":"113.566169ms","start":"2024-09-12T21:42:05.640642Z","end":"2024-09-12T21:42:05.754208Z","steps":["trace[696321980] 'process raft request' (duration: 50.149388ms)","trace[696321980] 'compare' (duration: 63.318473ms)"],"step_count":2}
{"level":"info","ts":"2024-09-12T21:42:05.754357Z","caller":"traceutil/trace.go:171","msg":"trace[407531068] transaction","detail":"{read_only:false; response_revision:2681; number_of_response:1; }","duration":"113.621462ms","start":"2024-09-12T21:42:05.640720Z","end":"2024-09-12T21:42:05.754341Z","steps":["trace[407531068] 'process raft request' (duration: 113.557768ms)"],"step_count":1}
==> gcp-auth [56a330f7d94a] <==
2024/09/12 21:33:36 Ready to write response ...
2024/09/12 21:41:44 Ready to marshal response ...
2024/09/12 21:41:44 Ready to write response ...
2024/09/12 21:41:45 Ready to marshal response ...
2024/09/12 21:41:45 Ready to write response ...
2024/09/12 21:41:49 Ready to marshal response ...
2024/09/12 21:41:49 Ready to write response ...
2024/09/12 21:41:50 Ready to marshal response ...
2024/09/12 21:41:50 Ready to write response ...
2024/09/12 21:41:50 Ready to marshal response ...
2024/09/12 21:41:50 Ready to write response ...
2024/09/12 21:42:01 Ready to marshal response ...
2024/09/12 21:42:01 Ready to write response ...
2024/09/12 21:42:02 Ready to marshal response ...
2024/09/12 21:42:02 Ready to write response ...
2024/09/12 21:42:02 Ready to marshal response ...
2024/09/12 21:42:02 Ready to write response ...
2024/09/12 21:42:02 Ready to marshal response ...
2024/09/12 21:42:02 Ready to write response ...
2024/09/12 21:42:06 Ready to marshal response ...
2024/09/12 21:42:06 Ready to write response ...
2024/09/12 21:42:29 Ready to marshal response ...
2024/09/12 21:42:29 Ready to write response ...
2024/09/12 21:42:40 Ready to marshal response ...
2024/09/12 21:42:40 Ready to write response ...
==> kernel <==
21:42:51 up 25 min, 0 users, load average: 0.44, 0.36, 0.28
Linux addons-207808 5.15.0-1067-gcp #75~20.04.1-Ubuntu SMP Wed Aug 7 20:43:22 UTC 2024 x86_64 x86_64 x86_64 GNU/Linux
PRETTY_NAME="Ubuntu 22.04.4 LTS"
==> kube-apiserver [1bac8a599bc4] <==
W0912 21:33:28.345638 1 cacher.go:171] Terminating all watchers from cacher queues.scheduling.volcano.sh
W0912 21:33:28.464543 1 cacher.go:171] Terminating all watchers from cacher jobs.batch.volcano.sh
W0912 21:33:28.843497 1 cacher.go:171] Terminating all watchers from cacher jobflows.flow.volcano.sh
W0912 21:33:29.151139 1 cacher.go:171] Terminating all watchers from cacher jobtemplates.flow.volcano.sh
I0912 21:41:55.209924 1 controller.go:615] quota admission added evaluator for: volumesnapshots.snapshot.storage.k8s.io
I0912 21:42:02.355345 1 alloc.go:330] "allocated clusterIPs" service="headlamp/headlamp" clusterIPs={"IPv4":"10.110.33.1"}
E0912 21:42:17.250742 1 authentication.go:73] "Unable to authenticate the request" err="[invalid bearer token, serviceaccounts \"local-path-provisioner-service-account\" not found]"
I0912 21:42:22.994417 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0912 21:42:22.994472 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0912 21:42:23.008396 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0912 21:42:23.008444 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0912 21:42:23.009381 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0912 21:42:23.009427 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0912 21:42:23.019779 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0912 21:42:23.019814 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0912 21:42:23.045599 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0912 21:42:23.045642 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
W0912 21:42:24.009949 1 cacher.go:171] Terminating all watchers from cacher volumesnapshotclasses.snapshot.storage.k8s.io
W0912 21:42:24.046072 1 cacher.go:171] Terminating all watchers from cacher volumesnapshotcontents.snapshot.storage.k8s.io
W0912 21:42:24.049826 1 cacher.go:171] Terminating all watchers from cacher volumesnapshots.snapshot.storage.k8s.io
I0912 21:42:25.391575 1 handler.go:286] Adding GroupVersion gadget.kinvolk.io v1alpha1 to ResourceManager
W0912 21:42:26.448801 1 cacher.go:171] Terminating all watchers from cacher traces.gadget.kinvolk.io
I0912 21:42:29.047845 1 controller.go:615] quota admission added evaluator for: ingresses.networking.k8s.io
I0912 21:42:29.214076 1 alloc.go:330] "allocated clusterIPs" service="default/nginx" clusterIPs={"IPv4":"10.104.134.100"}
I0912 21:42:40.689186 1 alloc.go:330] "allocated clusterIPs" service="default/hello-world-app" clusterIPs={"IPv4":"10.111.212.56"}
==> kube-controller-manager [7608d9143ec6] <==
I0912 21:42:36.848097 1 shared_informer.go:313] Waiting for caches to sync for resource quota
I0912 21:42:36.848131 1 shared_informer.go:320] Caches are synced for resource quota
I0912 21:42:37.169205 1 shared_informer.go:313] Waiting for caches to sync for garbage collector
I0912 21:42:37.169244 1 shared_informer.go:320] Caches are synced for garbage collector
I0912 21:42:40.572632 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="default/hello-world-app-55bf9c44b4" duration="10.610349ms"
I0912 21:42:40.576910 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="default/hello-world-app-55bf9c44b4" duration="4.229949ms"
I0912 21:42:40.576983 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="default/hello-world-app-55bf9c44b4" duration="41.409µs"
I0912 21:42:40.581057 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="default/hello-world-app-55bf9c44b4" duration="55.573µs"
W0912 21:42:41.908489 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0912 21:42:41.908529 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
I0912 21:42:42.937561 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="ingress-nginx/ingress-nginx-controller-bc57996ff" duration="9.305µs"
I0912 21:42:42.937637 1 job_controller.go:568] "enqueueing job" logger="job-controller" key="ingress-nginx/ingress-nginx-admission-create" delay="0s"
I0912 21:42:42.941314 1 job_controller.go:568] "enqueueing job" logger="job-controller" key="ingress-nginx/ingress-nginx-admission-patch" delay="0s"
I0912 21:42:43.436926 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="default/hello-world-app-55bf9c44b4" duration="5.320895ms"
I0912 21:42:43.437012 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="default/hello-world-app-55bf9c44b4" duration="46.484µs"
W0912 21:42:43.703684 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0912 21:42:43.703727 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0912 21:42:44.188019 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0912 21:42:44.188056 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0912 21:42:45.688457 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0912 21:42:45.688497 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0912 21:42:47.838843 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0912 21:42:47.838888 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
I0912 21:42:49.449098 1 namespace_controller.go:187] "Namespace has been deleted" logger="namespace-controller" namespace="local-path-storage"
I0912 21:42:50.504164 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/registry-66c9cd494c" duration="6.122µs"
==> kube-proxy [659c75feb9a7] <==
I0912 21:30:07.674840 1 server_linux.go:66] "Using iptables proxy"
I0912 21:30:07.798673 1 server.go:677] "Successfully retrieved node IP(s)" IPs=["192.168.49.2"]
E0912 21:30:07.798752 1 server.go:234] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I0912 21:30:07.854328 1 server.go:243] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I0912 21:30:07.854408 1 server_linux.go:169] "Using iptables Proxier"
I0912 21:30:07.857134 1 proxier.go:255] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I0912 21:30:07.857672 1 server.go:483] "Version info" version="v1.31.1"
I0912 21:30:07.857702 1 server.go:485] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I0912 21:30:07.859941 1 config.go:199] "Starting service config controller"
I0912 21:30:07.859958 1 shared_informer.go:313] Waiting for caches to sync for service config
I0912 21:30:07.859981 1 config.go:105] "Starting endpoint slice config controller"
I0912 21:30:07.859986 1 shared_informer.go:313] Waiting for caches to sync for endpoint slice config
I0912 21:30:07.860026 1 config.go:328] "Starting node config controller"
I0912 21:30:07.860037 1 shared_informer.go:313] Waiting for caches to sync for node config
I0912 21:30:07.960502 1 shared_informer.go:320] Caches are synced for endpoint slice config
I0912 21:30:07.960585 1 shared_informer.go:320] Caches are synced for service config
I0912 21:30:07.964054 1 shared_informer.go:320] Caches are synced for node config
==> kube-scheduler [8979ec8fc868] <==
W0912 21:29:59.738838 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E0912 21:29:59.739414 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User \"system:kube-scheduler\" cannot list resource \"services\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0912 21:29:59.739056 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
E0912 21:29:59.739476 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User \"system:kube-scheduler\" cannot list resource \"nodes\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0912 21:29:59.738782 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
E0912 21:29:59.739538 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User \"system:kube-scheduler\" cannot list resource \"namespaces\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0912 21:30:00.550471 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E0912 21:30:00.550507 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User \"system:kube-scheduler\" cannot list resource \"services\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0912 21:30:00.570778 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E0912 21:30:00.570814 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
W0912 21:30:00.617183 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
E0912 21:30:00.617232 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0912 21:30:00.664215 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
E0912 21:30:00.664256 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User \"system:kube-scheduler\" cannot list resource \"nodes\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0912 21:30:00.668424 1 reflector.go:561] runtime/asm_amd64.s:1695: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
E0912 21:30:00.668455 1 reflector.go:158] "Unhandled Error" err="runtime/asm_amd64.s:1695: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"extension-apiserver-authentication\" is forbidden: User \"system:kube-scheduler\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\"" logger="UnhandledError"
W0912 21:30:00.727637 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
E0912 21:30:00.727686 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User \"system:kube-scheduler\" cannot list resource \"namespaces\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0912 21:30:00.748179 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E0912 21:30:00.748225 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
W0912 21:30:00.810853 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E0912 21:30:00.810900 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"storageclasses\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
W0912 21:30:00.820258 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
E0912 21:30:00.820303 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicasets\" in API group \"apps\" at the cluster scope" logger="UnhandledError"
I0912 21:30:03.835537 1 shared_informer.go:320] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Sep 12 21:42:46 addons-207808 kubelet[2440]: E0912 21:42:46.479356 2440 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = Unknown desc = Error response from daemon: No such container: e57e88a01fbdbd94e7819dce90ab02e75d11fa905be87d0653d02d2efd94258f" containerID="e57e88a01fbdbd94e7819dce90ab02e75d11fa905be87d0653d02d2efd94258f"
Sep 12 21:42:46 addons-207808 kubelet[2440]: I0912 21:42:46.479417 2440 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"docker","ID":"e57e88a01fbdbd94e7819dce90ab02e75d11fa905be87d0653d02d2efd94258f"} err="failed to get container status \"e57e88a01fbdbd94e7819dce90ab02e75d11fa905be87d0653d02d2efd94258f\": rpc error: code = Unknown desc = Error response from daemon: No such container: e57e88a01fbdbd94e7819dce90ab02e75d11fa905be87d0653d02d2efd94258f"
Sep 12 21:42:46 addons-207808 kubelet[2440]: E0912 21:42:46.935858 2440 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"registry-test\" with ImagePullBackOff: \"Back-off pulling image \\\"gcr.io/k8s-minikube/busybox\\\"\"" pod="default/registry-test" podUID="ee4c74e8-2158-4d39-848e-8b76ed7122a8"
Sep 12 21:42:47 addons-207808 kubelet[2440]: I0912 21:42:47.942275 2440 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6102737-46d7-4392-a7cf-24f0e5cff364" path="/var/lib/kubelet/pods/b6102737-46d7-4392-a7cf-24f0e5cff364/volumes"
Sep 12 21:42:48 addons-207808 kubelet[2440]: I0912 21:42:48.934106 2440 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="kube-system/registry-proxy-fjxbz" secret="" err="secret \"gcp-auth\" not found"
Sep 12 21:42:50 addons-207808 kubelet[2440]: I0912 21:42:50.245091 2440 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"gcp-creds\" (UniqueName: \"kubernetes.io/host-path/ee4c74e8-2158-4d39-848e-8b76ed7122a8-gcp-creds\") pod \"ee4c74e8-2158-4d39-848e-8b76ed7122a8\" (UID: \"ee4c74e8-2158-4d39-848e-8b76ed7122a8\") "
Sep 12 21:42:50 addons-207808 kubelet[2440]: I0912 21:42:50.245116 2440 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ee4c74e8-2158-4d39-848e-8b76ed7122a8-gcp-creds" (OuterVolumeSpecName: "gcp-creds") pod "ee4c74e8-2158-4d39-848e-8b76ed7122a8" (UID: "ee4c74e8-2158-4d39-848e-8b76ed7122a8"). InnerVolumeSpecName "gcp-creds". PluginName "kubernetes.io/host-path", VolumeGidValue ""
Sep 12 21:42:50 addons-207808 kubelet[2440]: I0912 21:42:50.245158 2440 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zzxn7\" (UniqueName: \"kubernetes.io/projected/ee4c74e8-2158-4d39-848e-8b76ed7122a8-kube-api-access-zzxn7\") pod \"ee4c74e8-2158-4d39-848e-8b76ed7122a8\" (UID: \"ee4c74e8-2158-4d39-848e-8b76ed7122a8\") "
Sep 12 21:42:50 addons-207808 kubelet[2440]: I0912 21:42:50.246866 2440 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee4c74e8-2158-4d39-848e-8b76ed7122a8-kube-api-access-zzxn7" (OuterVolumeSpecName: "kube-api-access-zzxn7") pod "ee4c74e8-2158-4d39-848e-8b76ed7122a8" (UID: "ee4c74e8-2158-4d39-848e-8b76ed7122a8"). InnerVolumeSpecName "kube-api-access-zzxn7". PluginName "kubernetes.io/projected", VolumeGidValue ""
Sep 12 21:42:50 addons-207808 kubelet[2440]: I0912 21:42:50.345964 2440 reconciler_common.go:288] "Volume detached for volume \"gcp-creds\" (UniqueName: \"kubernetes.io/host-path/ee4c74e8-2158-4d39-848e-8b76ed7122a8-gcp-creds\") on node \"addons-207808\" DevicePath \"\""
Sep 12 21:42:50 addons-207808 kubelet[2440]: I0912 21:42:50.346009 2440 reconciler_common.go:288] "Volume detached for volume \"kube-api-access-zzxn7\" (UniqueName: \"kubernetes.io/projected/ee4c74e8-2158-4d39-848e-8b76ed7122a8-kube-api-access-zzxn7\") on node \"addons-207808\" DevicePath \"\""
Sep 12 21:42:50 addons-207808 kubelet[2440]: I0912 21:42:50.849367 2440 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k4jjx\" (UniqueName: \"kubernetes.io/projected/6646693e-e468-4f8c-a209-9f028e31da67-kube-api-access-k4jjx\") pod \"6646693e-e468-4f8c-a209-9f028e31da67\" (UID: \"6646693e-e468-4f8c-a209-9f028e31da67\") "
Sep 12 21:42:50 addons-207808 kubelet[2440]: I0912 21:42:50.849461 2440 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hcj9b\" (UniqueName: \"kubernetes.io/projected/6340cd55-7e16-4315-8b01-5e879a2b0d76-kube-api-access-hcj9b\") pod \"6340cd55-7e16-4315-8b01-5e879a2b0d76\" (UID: \"6340cd55-7e16-4315-8b01-5e879a2b0d76\") "
Sep 12 21:42:50 addons-207808 kubelet[2440]: I0912 21:42:50.851179 2440 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6646693e-e468-4f8c-a209-9f028e31da67-kube-api-access-k4jjx" (OuterVolumeSpecName: "kube-api-access-k4jjx") pod "6646693e-e468-4f8c-a209-9f028e31da67" (UID: "6646693e-e468-4f8c-a209-9f028e31da67"). InnerVolumeSpecName "kube-api-access-k4jjx". PluginName "kubernetes.io/projected", VolumeGidValue ""
Sep 12 21:42:50 addons-207808 kubelet[2440]: I0912 21:42:50.851569 2440 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6340cd55-7e16-4315-8b01-5e879a2b0d76-kube-api-access-hcj9b" (OuterVolumeSpecName: "kube-api-access-hcj9b") pod "6340cd55-7e16-4315-8b01-5e879a2b0d76" (UID: "6340cd55-7e16-4315-8b01-5e879a2b0d76"). InnerVolumeSpecName "kube-api-access-hcj9b". PluginName "kubernetes.io/projected", VolumeGidValue ""
Sep 12 21:42:50 addons-207808 kubelet[2440]: I0912 21:42:50.950481 2440 reconciler_common.go:288] "Volume detached for volume \"kube-api-access-k4jjx\" (UniqueName: \"kubernetes.io/projected/6646693e-e468-4f8c-a209-9f028e31da67-kube-api-access-k4jjx\") on node \"addons-207808\" DevicePath \"\""
Sep 12 21:42:50 addons-207808 kubelet[2440]: I0912 21:42:50.950514 2440 reconciler_common.go:288] "Volume detached for volume \"kube-api-access-hcj9b\" (UniqueName: \"kubernetes.io/projected/6340cd55-7e16-4315-8b01-5e879a2b0d76-kube-api-access-hcj9b\") on node \"addons-207808\" DevicePath \"\""
Sep 12 21:42:51 addons-207808 kubelet[2440]: I0912 21:42:51.528251 2440 scope.go:117] "RemoveContainer" containerID="7a19ac2f77504a3fb6429ea0692fe0ffc3ecdd7809aa8bfe68b5df69bb727402"
Sep 12 21:42:51 addons-207808 kubelet[2440]: I0912 21:42:51.545641 2440 scope.go:117] "RemoveContainer" containerID="7a19ac2f77504a3fb6429ea0692fe0ffc3ecdd7809aa8bfe68b5df69bb727402"
Sep 12 21:42:51 addons-207808 kubelet[2440]: E0912 21:42:51.546426 2440 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = Unknown desc = Error response from daemon: No such container: 7a19ac2f77504a3fb6429ea0692fe0ffc3ecdd7809aa8bfe68b5df69bb727402" containerID="7a19ac2f77504a3fb6429ea0692fe0ffc3ecdd7809aa8bfe68b5df69bb727402"
Sep 12 21:42:51 addons-207808 kubelet[2440]: I0912 21:42:51.546457 2440 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"docker","ID":"7a19ac2f77504a3fb6429ea0692fe0ffc3ecdd7809aa8bfe68b5df69bb727402"} err="failed to get container status \"7a19ac2f77504a3fb6429ea0692fe0ffc3ecdd7809aa8bfe68b5df69bb727402\": rpc error: code = Unknown desc = Error response from daemon: No such container: 7a19ac2f77504a3fb6429ea0692fe0ffc3ecdd7809aa8bfe68b5df69bb727402"
Sep 12 21:42:51 addons-207808 kubelet[2440]: I0912 21:42:51.546489 2440 scope.go:117] "RemoveContainer" containerID="67d8edc63cc4087ea595b825b3ea2af0672849eee4ba88641e5ce19407f5d55c"
Sep 12 21:42:51 addons-207808 kubelet[2440]: I0912 21:42:51.560722 2440 scope.go:117] "RemoveContainer" containerID="67d8edc63cc4087ea595b825b3ea2af0672849eee4ba88641e5ce19407f5d55c"
Sep 12 21:42:51 addons-207808 kubelet[2440]: E0912 21:42:51.561504 2440 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = Unknown desc = Error response from daemon: No such container: 67d8edc63cc4087ea595b825b3ea2af0672849eee4ba88641e5ce19407f5d55c" containerID="67d8edc63cc4087ea595b825b3ea2af0672849eee4ba88641e5ce19407f5d55c"
Sep 12 21:42:51 addons-207808 kubelet[2440]: I0912 21:42:51.561550 2440 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"docker","ID":"67d8edc63cc4087ea595b825b3ea2af0672849eee4ba88641e5ce19407f5d55c"} err="failed to get container status \"67d8edc63cc4087ea595b825b3ea2af0672849eee4ba88641e5ce19407f5d55c\": rpc error: code = Unknown desc = Error response from daemon: No such container: 67d8edc63cc4087ea595b825b3ea2af0672849eee4ba88641e5ce19407f5d55c"
==> storage-provisioner [fa068715e0b7] <==
I0912 21:30:16.338008 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I0912 21:30:16.437525 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I0912 21:30:16.437581 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I0912 21:30:16.456027 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I0912 21:30:16.456229 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_addons-207808_daf20f72-9563-41c4-adbd-36b6caaf2374!
I0912 21:30:16.456289 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"680a853e-cde1-40d3-94d4-b86f3e7c4972", APIVersion:"v1", ResourceVersion:"646", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' addons-207808_daf20f72-9563-41c4-adbd-36b6caaf2374 became leader
I0912 21:30:16.631423 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_addons-207808_daf20f72-9563-41c4-adbd-36b6caaf2374!
-- /stdout --
helpers_test.go:254: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p addons-207808 -n addons-207808
helpers_test.go:261: (dbg) Run: kubectl --context addons-207808 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:272: non-running pods: busybox
helpers_test.go:274: ======> post-mortem[TestAddons/parallel/Registry]: describe non-running pods <======
helpers_test.go:277: (dbg) Run: kubectl --context addons-207808 describe pod busybox
helpers_test.go:282: (dbg) kubectl --context addons-207808 describe pod busybox:
-- stdout --
Name: busybox
Namespace: default
Priority: 0
Service Account: default
Node: addons-207808/192.168.49.2
Start Time: Thu, 12 Sep 2024 21:33:36 +0000
Labels: integration-test=busybox
Annotations: <none>
Status: Pending
IP: 10.244.0.28
IPs:
IP: 10.244.0.28
Containers:
busybox:
Container ID:
Image: gcr.io/k8s-minikube/busybox:1.28.4-glibc
Image ID:
Port: <none>
Host Port: <none>
Command:
sleep
3600
State: Waiting
Reason: ImagePullBackOff
Ready: False
Restart Count: 0
Environment:
GOOGLE_APPLICATION_CREDENTIALS: /google-app-creds.json
PROJECT_ID: this_is_fake
GCP_PROJECT: this_is_fake
GCLOUD_PROJECT: this_is_fake
GOOGLE_CLOUD_PROJECT: this_is_fake
CLOUDSDK_CORE_PROJECT: this_is_fake
Mounts:
/google-app-creds.json from gcp-creds (ro)
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-kz9f9 (ro)
Conditions:
Type Status
PodReadyToStartContainers True
Initialized True
Ready False
ContainersReady False
PodScheduled True
Volumes:
kube-api-access-kz9f9:
Type: Projected (a volume that contains injected data from multiple sources)
TokenExpirationSeconds: 3607
ConfigMapName: kube-root-ca.crt
ConfigMapOptional: <nil>
DownwardAPI: true
gcp-creds:
Type: HostPath (bare host directory volume)
Path: /var/lib/minikube/google_application_credentials.json
HostPathType: File
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 9m16s default-scheduler Successfully assigned default/busybox to addons-207808
Normal Pulling 7m59s (x4 over 9m15s) kubelet Pulling image "gcr.io/k8s-minikube/busybox:1.28.4-glibc"
Warning Failed 7m58s (x4 over 9m15s) kubelet Failed to pull image "gcr.io/k8s-minikube/busybox:1.28.4-glibc": Error response from daemon: Head "https://gcr.io/v2/k8s-minikube/busybox/manifests/1.28.4-glibc": unauthorized: authentication failed
Warning Failed 7m58s (x4 over 9m15s) kubelet Error: ErrImagePull
Warning Failed 7m34s (x6 over 9m15s) kubelet Error: ImagePullBackOff
Normal BackOff 4m4s (x21 over 9m15s) kubelet Back-off pulling image "gcr.io/k8s-minikube/busybox:1.28.4-glibc"
-- /stdout --
helpers_test.go:285: <<< TestAddons/parallel/Registry FAILED: end of post-mortem logs <<<
helpers_test.go:286: ---------------------/post-mortem---------------------------------
--- FAIL: TestAddons/parallel/Registry (73.39s)