=== RUN TestAddons/parallel/Registry
=== PAUSE TestAddons/parallel/Registry
=== CONT TestAddons/parallel/Registry
addons_test.go:328: registry stabilized in 2.083819ms
addons_test.go:330: (dbg) TestAddons/parallel/Registry: waiting 6m0s for pods matching "actual-registry=true" in namespace "kube-system" ...
helpers_test.go:344: "registry-66c9cd494c-sswjh" [42afc9e2-bf4a-4c0a-9db9-8f58e617fcbe] Running
addons_test.go:330: (dbg) TestAddons/parallel/Registry: actual-registry=true healthy within 6.002711s
addons_test.go:333: (dbg) TestAddons/parallel/Registry: waiting 10m0s for pods matching "registry-proxy=true" in namespace "kube-system" ...
helpers_test.go:344: "registry-proxy-w6x4v" [8964449b-425a-4614-aa7b-d6cc98a185c7] Running
addons_test.go:333: (dbg) TestAddons/parallel/Registry: registry-proxy=true healthy within 5.003081335s
addons_test.go:338: (dbg) Run: kubectl --context addons-071702 delete po -l run=registry-test --now
addons_test.go:343: (dbg) Run: kubectl --context addons-071702 run --rm registry-test --restart=Never --image=gcr.io/k8s-minikube/busybox -it -- sh -c "wget --spider -S http://registry.kube-system.svc.cluster.local"
addons_test.go:343: (dbg) Non-zero exit: kubectl --context addons-071702 run --rm registry-test --restart=Never --image=gcr.io/k8s-minikube/busybox -it -- sh -c "wget --spider -S http://registry.kube-system.svc.cluster.local": exit status 1 (1m0.070783686s)
-- stdout --
pod "registry-test" deleted
-- /stdout --
** stderr **
error: timed out waiting for the condition
** /stderr **
addons_test.go:345: failed to hit registry.kube-system.svc.cluster.local. args "kubectl --context addons-071702 run --rm registry-test --restart=Never --image=gcr.io/k8s-minikube/busybox -it -- sh -c \"wget --spider -S http://registry.kube-system.svc.cluster.local\"" failed: exit status 1
addons_test.go:349: expected curl response be "HTTP/1.1 200", but got *pod "registry-test" deleted
*
addons_test.go:357: (dbg) Run: out/minikube-linux-amd64 -p addons-071702 ip
2024/09/23 10:34:05 [DEBUG] GET http://192.168.49.2:5000
addons_test.go:386: (dbg) Run: out/minikube-linux-amd64 -p addons-071702 addons disable registry --alsologtostderr -v=1
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:230: ======> post-mortem[TestAddons/parallel/Registry]: docker inspect <======
helpers_test.go:231: (dbg) Run: docker inspect addons-071702
helpers_test.go:235: (dbg) docker inspect addons-071702:
-- stdout --
[
{
"Id": "8966179ecdfd2cc670eb136a4fc91620be24b5bc1984967e12bcafcacc397742",
"Created": "2024-09-23T10:21:08.61285066Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 12583,
"ExitCode": 0,
"Error": "",
"StartedAt": "2024-09-23T10:21:08.745508485Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:d94335c0cd164ddebb3c5158e317bcf6d2e08dc08f448d25251f425acb842829",
"ResolvConfPath": "/var/lib/docker/containers/8966179ecdfd2cc670eb136a4fc91620be24b5bc1984967e12bcafcacc397742/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/8966179ecdfd2cc670eb136a4fc91620be24b5bc1984967e12bcafcacc397742/hostname",
"HostsPath": "/var/lib/docker/containers/8966179ecdfd2cc670eb136a4fc91620be24b5bc1984967e12bcafcacc397742/hosts",
"LogPath": "/var/lib/docker/containers/8966179ecdfd2cc670eb136a4fc91620be24b5bc1984967e12bcafcacc397742/8966179ecdfd2cc670eb136a4fc91620be24b5bc1984967e12bcafcacc397742-json.log",
"Name": "/addons-071702",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"addons-071702:/var",
"/lib/modules:/lib/modules:ro"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {
"max-size": "100m"
}
},
"NetworkMode": "addons-071702",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 4194304000,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 8388608000,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"LowerDir": "/var/lib/docker/overlay2/588b74eec0c8446e03e984eb4a9fe9b0c59ab6a00da3f5b0e38ccf11992a439d-init/diff:/var/lib/docker/overlay2/8ca2de7d8b65e2bda8878f4a091fa97667b4eaea3c506fec5159a312eef51d3c/diff",
"MergedDir": "/var/lib/docker/overlay2/588b74eec0c8446e03e984eb4a9fe9b0c59ab6a00da3f5b0e38ccf11992a439d/merged",
"UpperDir": "/var/lib/docker/overlay2/588b74eec0c8446e03e984eb4a9fe9b0c59ab6a00da3f5b0e38ccf11992a439d/diff",
"WorkDir": "/var/lib/docker/overlay2/588b74eec0c8446e03e984eb4a9fe9b0c59ab6a00da3f5b0e38ccf11992a439d/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "volume",
"Name": "addons-071702",
"Source": "/var/lib/docker/volumes/addons-071702/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
},
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
}
],
"Config": {
"Hostname": "addons-071702",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "addons-071702",
"name.minikube.sigs.k8s.io": "addons-071702",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "9521a3bbbe15abd113d8b278ff9250adfe953503d9dbe0e939d74eee71fe8bdb",
"SandboxKey": "/var/run/docker/netns/9521a3bbbe15",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32768"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32769"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32772"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32770"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32771"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"addons-071702": {
"IPAMConfig": {
"IPv4Address": "192.168.49.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "02:42:c0:a8:31:02",
"DriverOpts": null,
"NetworkID": "5b08ae3e0bfe730e82589f0f8972b14608d300a082908684ee88145be463a3d9",
"EndpointID": "9437440e9ebe63305f83fecb77205cfc1ee9f9a9a62037fee8d67e7161a1e372",
"Gateway": "192.168.49.1",
"IPAddress": "192.168.49.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"addons-071702",
"8966179ecdfd"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:239: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p addons-071702 -n addons-071702
helpers_test.go:244: <<< TestAddons/parallel/Registry FAILED: start of post-mortem logs <<<
helpers_test.go:245: ======> post-mortem[TestAddons/parallel/Registry]: minikube logs <======
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 -p addons-071702 logs -n 25
helpers_test.go:252: TestAddons/parallel/Registry logs:
-- stdout --
==> Audit <==
|---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
| Command | Args | Profile | User | Version | Start Time | End Time |
|---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
| start | --download-only -p | download-docker-381840 | jenkins | v1.34.0 | 23 Sep 24 10:20 UTC | |
| | download-docker-381840 | | | | | |
| | --alsologtostderr | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| delete | -p download-docker-381840 | download-docker-381840 | jenkins | v1.34.0 | 23 Sep 24 10:20 UTC | 23 Sep 24 10:20 UTC |
| start | --download-only -p | binary-mirror-885431 | jenkins | v1.34.0 | 23 Sep 24 10:20 UTC | |
| | binary-mirror-885431 | | | | | |
| | --alsologtostderr | | | | | |
| | --binary-mirror | | | | | |
| | http://127.0.0.1:41549 | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| delete | -p binary-mirror-885431 | binary-mirror-885431 | jenkins | v1.34.0 | 23 Sep 24 10:20 UTC | 23 Sep 24 10:20 UTC |
| addons | disable dashboard -p | addons-071702 | jenkins | v1.34.0 | 23 Sep 24 10:20 UTC | |
| | addons-071702 | | | | | |
| addons | enable dashboard -p | addons-071702 | jenkins | v1.34.0 | 23 Sep 24 10:20 UTC | |
| | addons-071702 | | | | | |
| start | -p addons-071702 --wait=true | addons-071702 | jenkins | v1.34.0 | 23 Sep 24 10:20 UTC | 23 Sep 24 10:24 UTC |
| | --memory=4000 --alsologtostderr | | | | | |
| | --addons=registry | | | | | |
| | --addons=metrics-server | | | | | |
| | --addons=volumesnapshots | | | | | |
| | --addons=csi-hostpath-driver | | | | | |
| | --addons=gcp-auth | | | | | |
| | --addons=cloud-spanner | | | | | |
| | --addons=inspektor-gadget | | | | | |
| | --addons=storage-provisioner-rancher | | | | | |
| | --addons=nvidia-device-plugin | | | | | |
| | --addons=yakd --addons=volcano | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| | --addons=ingress | | | | | |
| | --addons=ingress-dns | | | | | |
| addons | addons-071702 addons disable | addons-071702 | jenkins | v1.34.0 | 23 Sep 24 10:24 UTC | 23 Sep 24 10:24 UTC |
| | volcano --alsologtostderr -v=1 | | | | | |
| addons | addons-071702 addons disable | addons-071702 | jenkins | v1.34.0 | 23 Sep 24 10:32 UTC | 23 Sep 24 10:33 UTC |
| | yakd --alsologtostderr -v=1 | | | | | |
| ssh | addons-071702 ssh cat | addons-071702 | jenkins | v1.34.0 | 23 Sep 24 10:33 UTC | 23 Sep 24 10:33 UTC |
| | /opt/local-path-provisioner/pvc-1f21215f-8da6-4e9e-aa33-1db8504ddfb9_default_test-pvc/file1 | | | | | |
| addons | addons-071702 addons disable | addons-071702 | jenkins | v1.34.0 | 23 Sep 24 10:33 UTC | 23 Sep 24 10:33 UTC |
| | storage-provisioner-rancher | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | disable nvidia-device-plugin | addons-071702 | jenkins | v1.34.0 | 23 Sep 24 10:33 UTC | 23 Sep 24 10:33 UTC |
| | -p addons-071702 | | | | | |
| addons | addons-071702 addons | addons-071702 | jenkins | v1.34.0 | 23 Sep 24 10:33 UTC | 23 Sep 24 10:33 UTC |
| | disable metrics-server | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | enable headlamp | addons-071702 | jenkins | v1.34.0 | 23 Sep 24 10:33 UTC | 23 Sep 24 10:33 UTC |
| | -p addons-071702 | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | disable cloud-spanner -p | addons-071702 | jenkins | v1.34.0 | 23 Sep 24 10:33 UTC | 23 Sep 24 10:33 UTC |
| | addons-071702 | | | | | |
| addons | disable inspektor-gadget -p | addons-071702 | jenkins | v1.34.0 | 23 Sep 24 10:33 UTC | 23 Sep 24 10:33 UTC |
| | addons-071702 | | | | | |
| addons | addons-071702 addons disable | addons-071702 | jenkins | v1.34.0 | 23 Sep 24 10:33 UTC | 23 Sep 24 10:33 UTC |
| | headlamp --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| ssh | addons-071702 ssh curl -s | addons-071702 | jenkins | v1.34.0 | 23 Sep 24 10:33 UTC | 23 Sep 24 10:33 UTC |
| | http://127.0.0.1/ -H 'Host: | | | | | |
| | nginx.example.com' | | | | | |
| ip | addons-071702 ip | addons-071702 | jenkins | v1.34.0 | 23 Sep 24 10:33 UTC | 23 Sep 24 10:33 UTC |
| addons | addons-071702 addons disable | addons-071702 | jenkins | v1.34.0 | 23 Sep 24 10:33 UTC | 23 Sep 24 10:33 UTC |
| | ingress-dns --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| addons | addons-071702 addons disable | addons-071702 | jenkins | v1.34.0 | 23 Sep 24 10:33 UTC | 23 Sep 24 10:33 UTC |
| | ingress --alsologtostderr -v=1 | | | | | |
| addons | addons-071702 addons | addons-071702 | jenkins | v1.34.0 | 23 Sep 24 10:33 UTC | 23 Sep 24 10:33 UTC |
| | disable csi-hostpath-driver | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-071702 addons | addons-071702 | jenkins | v1.34.0 | 23 Sep 24 10:33 UTC | 23 Sep 24 10:33 UTC |
| | disable volumesnapshots | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| ip | addons-071702 ip | addons-071702 | jenkins | v1.34.0 | 23 Sep 24 10:34 UTC | 23 Sep 24 10:34 UTC |
| addons | addons-071702 addons disable | addons-071702 | jenkins | v1.34.0 | 23 Sep 24 10:34 UTC | 23 Sep 24 10:34 UTC |
| | registry --alsologtostderr | | | | | |
| | -v=1 | | | | | |
|---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
==> Last Start <==
Log file created at: 2024/09/23 10:20:45
Running on machine: ubuntu-20-agent-2
Binary: Built with gc go1.23.0 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I0923 10:20:45.379792 11849 out.go:345] Setting OutFile to fd 1 ...
I0923 10:20:45.379898 11849 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0923 10:20:45.379908 11849 out.go:358] Setting ErrFile to fd 2...
I0923 10:20:45.379915 11849 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0923 10:20:45.380088 11849 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19689-3716/.minikube/bin
I0923 10:20:45.380668 11849 out.go:352] Setting JSON to false
I0923 10:20:45.381461 11849 start.go:129] hostinfo: {"hostname":"ubuntu-20-agent-2","uptime":194,"bootTime":1727086651,"procs":178,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1069-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I0923 10:20:45.381516 11849 start.go:139] virtualization: kvm guest
I0923 10:20:45.383461 11849 out.go:177] * [addons-071702] minikube v1.34.0 on Ubuntu 20.04 (kvm/amd64)
I0923 10:20:45.384765 11849 out.go:177] - MINIKUBE_LOCATION=19689
I0923 10:20:45.384768 11849 notify.go:220] Checking for updates...
I0923 10:20:45.386044 11849 out.go:177] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I0923 10:20:45.387372 11849 out.go:177] - KUBECONFIG=/home/jenkins/minikube-integration/19689-3716/kubeconfig
I0923 10:20:45.388643 11849 out.go:177] - MINIKUBE_HOME=/home/jenkins/minikube-integration/19689-3716/.minikube
I0923 10:20:45.389715 11849 out.go:177] - MINIKUBE_BIN=out/minikube-linux-amd64
I0923 10:20:45.390753 11849 out.go:177] - MINIKUBE_FORCE_SYSTEMD=
I0923 10:20:45.392010 11849 driver.go:394] Setting default libvirt URI to qemu:///system
I0923 10:20:45.413497 11849 docker.go:123] docker version: linux-27.3.1:Docker Engine - Community
I0923 10:20:45.413571 11849 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0923 10:20:45.461916 11849 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:30 OomKillDisable:true NGoroutines:45 SystemTime:2024-09-23 10:20:45.452964694 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1069-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33647935488 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-2 Labels:[] ExperimentalBuild:false ServerVersion:27.3.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:7f7fdf5fed64eb6a7caf99b3e12efcf9d60e311c Expected:7f7fdf5fed64eb6a7caf99b3e12efcf9d60e311c} RuncCommit:{ID:v1.1.14-0-g2c9f560 Expected:v1.1.14-0-g2c9f560} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:[WARNING: bri
dge-nf-call-iptables is disabled WARNING: bridge-nf-call-ip6tables is disabled] ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.17.1] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.29.7] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0923 10:20:45.462013 11849 docker.go:318] overlay module found
I0923 10:20:45.463742 11849 out.go:177] * Using the docker driver based on user configuration
I0923 10:20:45.465001 11849 start.go:297] selected driver: docker
I0923 10:20:45.465018 11849 start.go:901] validating driver "docker" against <nil>
I0923 10:20:45.465031 11849 start.go:912] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I0923 10:20:45.465784 11849 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0923 10:20:45.509849 11849 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:30 OomKillDisable:true NGoroutines:45 SystemTime:2024-09-23 10:20:45.501529913 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1069-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:x
86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33647935488 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent-2 Labels:[] ExperimentalBuild:false ServerVersion:27.3.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:7f7fdf5fed64eb6a7caf99b3e12efcf9d60e311c Expected:7f7fdf5fed64eb6a7caf99b3e12efcf9d60e311c} RuncCommit:{ID:v1.1.14-0-g2c9f560 Expected:v1.1.14-0-g2c9f560} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:[WARNING: bri
dge-nf-call-iptables is disabled WARNING: bridge-nf-call-ip6tables is disabled] ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.17.1] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.29.7] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0923 10:20:45.510006 11849 start_flags.go:310] no existing cluster config was found, will generate one from the flags
I0923 10:20:45.510229 11849 start_flags.go:947] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0923 10:20:45.512098 11849 out.go:177] * Using Docker driver with root privileges
I0923 10:20:45.513528 11849 cni.go:84] Creating CNI manager for ""
I0923 10:20:45.513585 11849 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0923 10:20:45.513596 11849 start_flags.go:319] Found "bridge CNI" CNI - setting NetworkPlugin=cni
I0923 10:20:45.513653 11849 start.go:340] cluster config:
{Name:addons-071702 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.1 ClusterName:addons-071702 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime
:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock:
SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0923 10:20:45.515050 11849 out.go:177] * Starting "addons-071702" primary control-plane node in "addons-071702" cluster
I0923 10:20:45.516260 11849 cache.go:121] Beginning downloading kic base image for docker with docker
I0923 10:20:45.517542 11849 out.go:177] * Pulling base image v0.0.45-1726784731-19672 ...
I0923 10:20:45.518872 11849 preload.go:131] Checking if preload exists for k8s version v1.31.1 and runtime docker
I0923 10:20:45.518906 11849 preload.go:146] Found local preload: /home/jenkins/minikube-integration/19689-3716/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.1-docker-overlay2-amd64.tar.lz4
I0923 10:20:45.518914 11849 cache.go:56] Caching tarball of preloaded images
I0923 10:20:45.518973 11849 image.go:79] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed in local docker daemon
I0923 10:20:45.518993 11849 preload.go:172] Found /home/jenkins/minikube-integration/19689-3716/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.1-docker-overlay2-amd64.tar.lz4 in cache, skipping download
I0923 10:20:45.519004 11849 cache.go:59] Finished verifying existence of preloaded tar for v1.31.1 on docker
I0923 10:20:45.519331 11849 profile.go:143] Saving config to /home/jenkins/minikube-integration/19689-3716/.minikube/profiles/addons-071702/config.json ...
I0923 10:20:45.519363 11849 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19689-3716/.minikube/profiles/addons-071702/config.json: {Name:mk608206dd87a06e8de5d5ff517c3808c2af70c1 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0923 10:20:45.534568 11849 cache.go:149] Downloading gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed to local cache
I0923 10:20:45.534663 11849 image.go:63] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed in local cache directory
I0923 10:20:45.534707 11849 image.go:66] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed in local cache directory, skipping pull
I0923 10:20:45.534714 11849 image.go:135] gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed exists in cache, skipping pull
I0923 10:20:45.534725 11849 cache.go:152] successfully saved gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed as a tarball
I0923 10:20:45.534733 11849 cache.go:162] Loading gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed from local cache
I0923 10:20:57.266177 11849 cache.go:164] successfully loaded and using gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed from cached tarball
I0923 10:20:57.266209 11849 cache.go:194] Successfully downloaded all kic artifacts
I0923 10:20:57.266256 11849 start.go:360] acquireMachinesLock for addons-071702: {Name:mk685407b574de814a7d843dc9648ed76ce90d19 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0923 10:20:57.266360 11849 start.go:364] duration metric: took 81.327µs to acquireMachinesLock for "addons-071702"
I0923 10:20:57.266384 11849 start.go:93] Provisioning new machine with config: &{Name:addons-071702 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.1 ClusterName:addons-071702 Namespace:default APIServerHAVIP: APIServerName:min
ikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false Cust
omQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}
I0923 10:20:57.266501 11849 start.go:125] createHost starting for "" (driver="docker")
I0923 10:20:57.268448 11849 out.go:235] * Creating docker container (CPUs=2, Memory=4000MB) ...
I0923 10:20:57.268719 11849 start.go:159] libmachine.API.Create for "addons-071702" (driver="docker")
I0923 10:20:57.268748 11849 client.go:168] LocalClient.Create starting
I0923 10:20:57.268888 11849 main.go:141] libmachine: Creating CA: /home/jenkins/minikube-integration/19689-3716/.minikube/certs/ca.pem
I0923 10:20:57.348092 11849 main.go:141] libmachine: Creating client certificate: /home/jenkins/minikube-integration/19689-3716/.minikube/certs/cert.pem
I0923 10:20:57.416102 11849 cli_runner.go:164] Run: docker network inspect addons-071702 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W0923 10:20:57.431337 11849 cli_runner.go:211] docker network inspect addons-071702 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I0923 10:20:57.431398 11849 network_create.go:284] running [docker network inspect addons-071702] to gather additional debugging logs...
I0923 10:20:57.431415 11849 cli_runner.go:164] Run: docker network inspect addons-071702
W0923 10:20:57.446403 11849 cli_runner.go:211] docker network inspect addons-071702 returned with exit code 1
I0923 10:20:57.446435 11849 network_create.go:287] error running [docker network inspect addons-071702]: docker network inspect addons-071702: exit status 1
stdout:
[]
stderr:
Error response from daemon: network addons-071702 not found
I0923 10:20:57.446446 11849 network_create.go:289] output of [docker network inspect addons-071702]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network addons-071702 not found
** /stderr **
I0923 10:20:57.446514 11849 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0923 10:20:57.461459 11849 network.go:206] using free private subnet 192.168.49.0/24: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001ef3ee0}
I0923 10:20:57.461498 11849 network_create.go:124] attempt to create docker network addons-071702 192.168.49.0/24 with gateway 192.168.49.1 and MTU of 1500 ...
I0923 10:20:57.461535 11849 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.49.0/24 --gateway=192.168.49.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=addons-071702 addons-071702
I0923 10:20:57.520570 11849 network_create.go:108] docker network addons-071702 192.168.49.0/24 created
I0923 10:20:57.520601 11849 kic.go:121] calculated static IP "192.168.49.2" for the "addons-071702" container
I0923 10:20:57.520672 11849 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0923 10:20:57.534541 11849 cli_runner.go:164] Run: docker volume create addons-071702 --label name.minikube.sigs.k8s.io=addons-071702 --label created_by.minikube.sigs.k8s.io=true
I0923 10:20:57.550415 11849 oci.go:103] Successfully created a docker volume addons-071702
I0923 10:20:57.550483 11849 cli_runner.go:164] Run: docker run --rm --name addons-071702-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-071702 --entrypoint /usr/bin/test -v addons-071702:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed -d /var/lib
I0923 10:21:04.731475 11849 cli_runner.go:217] Completed: docker run --rm --name addons-071702-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-071702 --entrypoint /usr/bin/test -v addons-071702:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed -d /var/lib: (7.180951038s)
I0923 10:21:04.731504 11849 oci.go:107] Successfully prepared a docker volume addons-071702
I0923 10:21:04.731528 11849 preload.go:131] Checking if preload exists for k8s version v1.31.1 and runtime docker
I0923 10:21:04.731546 11849 kic.go:194] Starting extracting preloaded images to volume ...
I0923 10:21:04.731619 11849 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/19689-3716/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.1-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v addons-071702:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed -I lz4 -xf /preloaded.tar -C /extractDir
I0923 10:21:08.556706 11849 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/19689-3716/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.1-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v addons-071702:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed -I lz4 -xf /preloaded.tar -C /extractDir: (3.825051884s)
I0923 10:21:08.556734 11849 kic.go:203] duration metric: took 3.82518497s to extract preloaded images to volume ...
W0923 10:21:08.556839 11849 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I0923 10:21:08.556927 11849 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0923 10:21:08.599105 11849 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname addons-071702 --name addons-071702 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-071702 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=addons-071702 --network addons-071702 --ip 192.168.49.2 --volume addons-071702:/var --security-opt apparmor=unconfined --memory=4000mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed
I0923 10:21:08.901402 11849 cli_runner.go:164] Run: docker container inspect addons-071702 --format={{.State.Running}}
I0923 10:21:08.919284 11849 cli_runner.go:164] Run: docker container inspect addons-071702 --format={{.State.Status}}
I0923 10:21:08.937298 11849 cli_runner.go:164] Run: docker exec addons-071702 stat /var/lib/dpkg/alternatives/iptables
I0923 10:21:08.976849 11849 oci.go:144] the created container "addons-071702" has a running status.
I0923 10:21:08.976882 11849 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/19689-3716/.minikube/machines/addons-071702/id_rsa...
I0923 10:21:09.142646 11849 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/19689-3716/.minikube/machines/addons-071702/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0923 10:21:09.162592 11849 cli_runner.go:164] Run: docker container inspect addons-071702 --format={{.State.Status}}
I0923 10:21:09.180564 11849 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0923 10:21:09.180587 11849 kic_runner.go:114] Args: [docker exec --privileged addons-071702 chown docker:docker /home/docker/.ssh/authorized_keys]
I0923 10:21:09.256971 11849 cli_runner.go:164] Run: docker container inspect addons-071702 --format={{.State.Status}}
I0923 10:21:09.280370 11849 machine.go:93] provisionDockerMachine start ...
I0923 10:21:09.280469 11849 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-071702
I0923 10:21:09.297385 11849 main.go:141] libmachine: Using SSH client type: native
I0923 10:21:09.297572 11849 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x864a40] 0x867720 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0923 10:21:09.297593 11849 main.go:141] libmachine: About to run SSH command:
hostname
I0923 10:21:09.529772 11849 main.go:141] libmachine: SSH cmd err, output: <nil>: addons-071702
I0923 10:21:09.529797 11849 ubuntu.go:169] provisioning hostname "addons-071702"
I0923 10:21:09.529846 11849 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-071702
I0923 10:21:09.546989 11849 main.go:141] libmachine: Using SSH client type: native
I0923 10:21:09.547157 11849 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x864a40] 0x867720 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0923 10:21:09.547179 11849 main.go:141] libmachine: About to run SSH command:
sudo hostname addons-071702 && echo "addons-071702" | sudo tee /etc/hostname
I0923 10:21:09.683911 11849 main.go:141] libmachine: SSH cmd err, output: <nil>: addons-071702
I0923 10:21:09.683973 11849 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-071702
I0923 10:21:09.699615 11849 main.go:141] libmachine: Using SSH client type: native
I0923 10:21:09.699819 11849 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x864a40] 0x867720 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0923 10:21:09.699843 11849 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\saddons-071702' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 addons-071702/g' /etc/hosts;
else
echo '127.0.1.1 addons-071702' | sudo tee -a /etc/hosts;
fi
fi
I0923 10:21:09.826432 11849 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0923 10:21:09.826458 11849 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/19689-3716/.minikube CaCertPath:/home/jenkins/minikube-integration/19689-3716/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/19689-3716/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/19689-3716/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/19689-3716/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/19689-3716/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/19689-3716/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/19689-3716/.minikube}
I0923 10:21:09.826478 11849 ubuntu.go:177] setting up certificates
I0923 10:21:09.826489 11849 provision.go:84] configureAuth start
I0923 10:21:09.826549 11849 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-071702
I0923 10:21:09.841763 11849 provision.go:143] copyHostCerts
I0923 10:21:09.841825 11849 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19689-3716/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/19689-3716/.minikube/ca.pem (1082 bytes)
I0923 10:21:09.841935 11849 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19689-3716/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/19689-3716/.minikube/cert.pem (1123 bytes)
I0923 10:21:09.841991 11849 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19689-3716/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/19689-3716/.minikube/key.pem (1675 bytes)
I0923 10:21:09.842044 11849 provision.go:117] generating server cert: /home/jenkins/minikube-integration/19689-3716/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/19689-3716/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/19689-3716/.minikube/certs/ca-key.pem org=jenkins.addons-071702 san=[127.0.0.1 192.168.49.2 addons-071702 localhost minikube]
I0923 10:21:10.034874 11849 provision.go:177] copyRemoteCerts
I0923 10:21:10.034933 11849 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0923 10:21:10.034967 11849 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-071702
I0923 10:21:10.051434 11849 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19689-3716/.minikube/machines/addons-071702/id_rsa Username:docker}
I0923 10:21:10.142459 11849 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19689-3716/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I0923 10:21:10.162896 11849 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19689-3716/.minikube/machines/server.pem --> /etc/docker/server.pem (1208 bytes)
I0923 10:21:10.183128 11849 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19689-3716/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1679 bytes)
I0923 10:21:10.203345 11849 provision.go:87] duration metric: took 376.846086ms to configureAuth
I0923 10:21:10.203367 11849 ubuntu.go:193] setting minikube options for container-runtime
I0923 10:21:10.203514 11849 config.go:182] Loaded profile config "addons-071702": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.31.1
I0923 10:21:10.203558 11849 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-071702
I0923 10:21:10.219624 11849 main.go:141] libmachine: Using SSH client type: native
I0923 10:21:10.219809 11849 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x864a40] 0x867720 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0923 10:21:10.219825 11849 main.go:141] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I0923 10:21:10.346611 11849 main.go:141] libmachine: SSH cmd err, output: <nil>: overlay
I0923 10:21:10.346637 11849 ubuntu.go:71] root file system type: overlay
I0923 10:21:10.346768 11849 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I0923 10:21:10.346841 11849 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-071702
I0923 10:21:10.363783 11849 main.go:141] libmachine: Using SSH client type: native
I0923 10:21:10.363956 11849 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x864a40] 0x867720 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0923 10:21:10.364013 11849 main.go:141] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I0923 10:21:10.500312 11849 main.go:141] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
I0923 10:21:10.500413 11849 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-071702
I0923 10:21:10.516123 11849 main.go:141] libmachine: Using SSH client type: native
I0923 10:21:10.516286 11849 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x864a40] 0x867720 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0923 10:21:10.516304 11849 main.go:141] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I0923 10:21:11.173964 11849 main.go:141] libmachine: SSH cmd err, output: <nil>: --- /lib/systemd/system/docker.service 2024-09-19 14:24:32.000000000 +0000
+++ /lib/systemd/system/docker.service.new 2024-09-23 10:21:10.496883873 +0000
@@ -1,46 +1,49 @@
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
-After=network-online.target docker.socket firewalld.service containerd.service time-set.target
-Wants=network-online.target containerd.service
+BindsTo=containerd.service
+After=network-online.target firewalld.service containerd.service
+Wants=network-online.target
Requires=docker.socket
+StartLimitBurst=3
+StartLimitIntervalSec=60
[Service]
Type=notify
-# the default is not to use systemd for cgroups because the delegate issues still
-# exists and systemd currently does not support the cgroup feature set required
-# for containers run by docker
-ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
-ExecReload=/bin/kill -s HUP $MAINPID
-TimeoutStartSec=0
-RestartSec=2
-Restart=always
+Restart=on-failure
-# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
-# Both the old, and new location are accepted by systemd 229 and up, so using the old location
-# to make them work for either version of systemd.
-StartLimitBurst=3
-# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
-# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
-# this option work for either version of systemd.
-StartLimitInterval=60s
+
+# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
+# The base configuration already specifies an 'ExecStart=...' command. The first directive
+# here is to clear out that command inherited from the base configuration. Without this,
+# the command from the base configuration and the command specified here are treated as
+# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
+# will catch this invalid input and refuse to start the service with an error like:
+# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
+
+# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
+# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
+ExecStart=
+ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
+ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
+LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
-# Comment TasksMax if your systemd version does not support it.
-# Only systemd 226 and above support this option.
+# Uncomment TasksMax if your systemd version supports it.
+# Only systemd 226 and above support this version.
TasksMax=infinity
+TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
-OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
Synchronizing state of docker.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable docker
I0923 10:21:11.173996 11849 machine.go:96] duration metric: took 1.89359565s to provisionDockerMachine
I0923 10:21:11.174006 11849 client.go:171] duration metric: took 13.905248575s to LocalClient.Create
I0923 10:21:11.174025 11849 start.go:167] duration metric: took 13.905305599s to libmachine.API.Create "addons-071702"
I0923 10:21:11.174034 11849 start.go:293] postStartSetup for "addons-071702" (driver="docker")
I0923 10:21:11.174049 11849 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0923 10:21:11.174099 11849 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0923 10:21:11.174131 11849 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-071702
I0923 10:21:11.190914 11849 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19689-3716/.minikube/machines/addons-071702/id_rsa Username:docker}
I0923 10:21:11.282838 11849 ssh_runner.go:195] Run: cat /etc/os-release
I0923 10:21:11.285665 11849 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0923 10:21:11.285692 11849 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0923 10:21:11.285700 11849 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0923 10:21:11.285707 11849 info.go:137] Remote host: Ubuntu 22.04.5 LTS
I0923 10:21:11.285717 11849 filesync.go:126] Scanning /home/jenkins/minikube-integration/19689-3716/.minikube/addons for local assets ...
I0923 10:21:11.285777 11849 filesync.go:126] Scanning /home/jenkins/minikube-integration/19689-3716/.minikube/files for local assets ...
I0923 10:21:11.285807 11849 start.go:296] duration metric: took 111.766416ms for postStartSetup
I0923 10:21:11.286130 11849 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-071702
I0923 10:21:11.301750 11849 profile.go:143] Saving config to /home/jenkins/minikube-integration/19689-3716/.minikube/profiles/addons-071702/config.json ...
I0923 10:21:11.302043 11849 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0923 10:21:11.302094 11849 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-071702
I0923 10:21:11.317532 11849 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19689-3716/.minikube/machines/addons-071702/id_rsa Username:docker}
I0923 10:21:11.403025 11849 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0923 10:21:11.406669 11849 start.go:128] duration metric: took 14.140131943s to createHost
I0923 10:21:11.406702 11849 start.go:83] releasing machines lock for "addons-071702", held for 14.140329172s
I0923 10:21:11.406766 11849 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-071702
I0923 10:21:11.422107 11849 ssh_runner.go:195] Run: cat /version.json
I0923 10:21:11.422144 11849 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-071702
I0923 10:21:11.422202 11849 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0923 10:21:11.422257 11849 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-071702
I0923 10:21:11.439064 11849 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19689-3716/.minikube/machines/addons-071702/id_rsa Username:docker}
I0923 10:21:11.440405 11849 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19689-3716/.minikube/machines/addons-071702/id_rsa Username:docker}
I0923 10:21:11.526000 11849 ssh_runner.go:195] Run: systemctl --version
I0923 10:21:11.595893 11849 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0923 10:21:11.599940 11849 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I0923 10:21:11.620610 11849 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I0923 10:21:11.620684 11849 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0923 10:21:11.643988 11849 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I0923 10:21:11.644015 11849 start.go:495] detecting cgroup driver to use...
I0923 10:21:11.644048 11849 detect.go:187] detected "cgroupfs" cgroup driver on host os
I0923 10:21:11.644153 11849 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0923 10:21:11.657421 11849 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10"|' /etc/containerd/config.toml"
I0923 10:21:11.665590 11849 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0923 10:21:11.673416 11849 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I0923 10:21:11.673466 11849 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I0923 10:21:11.681235 11849 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0923 10:21:11.688809 11849 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0923 10:21:11.696405 11849 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0923 10:21:11.703979 11849 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0923 10:21:11.711187 11849 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0923 10:21:11.718827 11849 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0923 10:21:11.726625 11849 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0923 10:21:11.734392 11849 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0923 10:21:11.741107 11849 crio.go:166] couldn't verify netfilter by "sudo sysctl net.bridge.bridge-nf-call-iptables" which might be okay. error: sudo sysctl net.bridge.bridge-nf-call-iptables: Process exited with status 255
stdout:
stderr:
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
I0923 10:21:11.741160 11849 ssh_runner.go:195] Run: sudo modprobe br_netfilter
I0923 10:21:11.752971 11849 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0923 10:21:11.760136 11849 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0923 10:21:11.834146 11849 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0923 10:21:11.900360 11849 start.go:495] detecting cgroup driver to use...
I0923 10:21:11.900407 11849 detect.go:187] detected "cgroupfs" cgroup driver on host os
I0923 10:21:11.900454 11849 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I0923 10:21:11.910698 11849 cruntime.go:279] skipping containerd shutdown because we are bound to it
I0923 10:21:11.910756 11849 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0923 10:21:11.921463 11849 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I0923 10:21:11.935676 11849 ssh_runner.go:195] Run: which cri-dockerd
I0923 10:21:11.938624 11849 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I0923 10:21:11.946474 11849 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (190 bytes)
I0923 10:21:11.961963 11849 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I0923 10:21:12.032080 11849 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I0923 10:21:12.112795 11849 docker.go:574] configuring docker to use "cgroupfs" as cgroup driver...
I0923 10:21:12.112946 11849 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
I0923 10:21:12.128807 11849 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0923 10:21:12.218962 11849 ssh_runner.go:195] Run: sudo systemctl restart docker
I0923 10:21:12.459347 11849 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I0923 10:21:12.469507 11849 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0923 10:21:12.479146 11849 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I0923 10:21:12.554718 11849 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I0923 10:21:12.626497 11849 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0923 10:21:12.698106 11849 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I0923 10:21:12.709488 11849 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0923 10:21:12.718372 11849 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0923 10:21:12.795130 11849 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I0923 10:21:12.850109 11849 start.go:542] Will wait 60s for socket path /var/run/cri-dockerd.sock
I0923 10:21:12.850200 11849 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I0923 10:21:12.853621 11849 start.go:563] Will wait 60s for crictl version
I0923 10:21:12.853670 11849 ssh_runner.go:195] Run: which crictl
I0923 10:21:12.856575 11849 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0923 10:21:12.886787 11849 start.go:579] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 27.3.0
RuntimeApiVersion: v1
I0923 10:21:12.886846 11849 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0923 10:21:12.908439 11849 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0923 10:21:12.931467 11849 out.go:235] * Preparing Kubernetes v1.31.1 on Docker 27.3.0 ...
I0923 10:21:12.931544 11849 cli_runner.go:164] Run: docker network inspect addons-071702 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0923 10:21:12.948273 11849 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I0923 10:21:12.951389 11849 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0923 10:21:12.960618 11849 kubeadm.go:883] updating cluster {Name:addons-071702 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.1 ClusterName:addons-071702 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNa
mes:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuF
irmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I0923 10:21:12.960721 11849 preload.go:131] Checking if preload exists for k8s version v1.31.1 and runtime docker
I0923 10:21:12.960757 11849 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0923 10:21:12.977949 11849 docker.go:685] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.31.1
registry.k8s.io/kube-controller-manager:v1.31.1
registry.k8s.io/kube-scheduler:v1.31.1
registry.k8s.io/kube-proxy:v1.31.1
registry.k8s.io/coredns/coredns:v1.11.3
registry.k8s.io/etcd:3.5.15-0
registry.k8s.io/pause:3.10
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I0923 10:21:12.977970 11849 docker.go:615] Images already preloaded, skipping extraction
I0923 10:21:12.978030 11849 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0923 10:21:12.995809 11849 docker.go:685] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.31.1
registry.k8s.io/kube-controller-manager:v1.31.1
registry.k8s.io/kube-scheduler:v1.31.1
registry.k8s.io/kube-proxy:v1.31.1
registry.k8s.io/coredns/coredns:v1.11.3
registry.k8s.io/etcd:3.5.15-0
registry.k8s.io/pause:3.10
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I0923 10:21:12.995833 11849 cache_images.go:84] Images are preloaded, skipping loading
I0923 10:21:12.995843 11849 kubeadm.go:934] updating node { 192.168.49.2 8443 v1.31.1 docker true true} ...
I0923 10:21:12.995929 11849 kubeadm.go:946] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.31.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=addons-071702 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.2
[Install]
config:
{KubernetesVersion:v1.31.1 ClusterName:addons-071702 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0923 10:21:12.995978 11849 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
I0923 10:21:13.037067 11849 cni.go:84] Creating CNI manager for ""
I0923 10:21:13.037100 11849 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0923 10:21:13.037114 11849 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
I0923 10:21:13.037135 11849 kubeadm.go:181] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8443 KubernetesVersion:v1.31.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:addons-071702 NodeName:addons-071702 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.49.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kuber
netes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I0923 10:21:13.037256 11849 kubeadm.go:187] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.49.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
name: "addons-071702"
kubeletExtraArgs:
node-ip: 192.168.49.2
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.31.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I0923 10:21:13.037304 11849 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.31.1
I0923 10:21:13.044861 11849 binaries.go:44] Found k8s binaries, skipping transfer
I0923 10:21:13.044930 11849 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I0923 10:21:13.052089 11849 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (312 bytes)
I0923 10:21:13.066664 11849 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0923 10:21:13.081453 11849 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2155 bytes)
I0923 10:21:13.096677 11849 ssh_runner.go:195] Run: grep 192.168.49.2 control-plane.minikube.internal$ /etc/hosts
I0923 10:21:13.099620 11849 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0923 10:21:13.108726 11849 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0923 10:21:13.179081 11849 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0923 10:21:13.190587 11849 certs.go:68] Setting up /home/jenkins/minikube-integration/19689-3716/.minikube/profiles/addons-071702 for IP: 192.168.49.2
I0923 10:21:13.190604 11849 certs.go:194] generating shared ca certs ...
I0923 10:21:13.190621 11849 certs.go:226] acquiring lock for ca certs: {Name:mk6ee9a202179db9ed63e6a3182344c97ea3d5d2 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0923 10:21:13.190755 11849 certs.go:240] generating "minikubeCA" ca cert: /home/jenkins/minikube-integration/19689-3716/.minikube/ca.key
I0923 10:21:13.281002 11849 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19689-3716/.minikube/ca.crt ...
I0923 10:21:13.281031 11849 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19689-3716/.minikube/ca.crt: {Name:mk2fc5f01f85cfa27ad99268606091a3add4e34b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0923 10:21:13.281185 11849 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19689-3716/.minikube/ca.key ...
I0923 10:21:13.281196 11849 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19689-3716/.minikube/ca.key: {Name:mk43bd82b69f9d9d83810a95125b2d656f55fea1 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0923 10:21:13.281267 11849 certs.go:240] generating "proxyClientCA" ca cert: /home/jenkins/minikube-integration/19689-3716/.minikube/proxy-client-ca.key
I0923 10:21:13.369230 11849 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19689-3716/.minikube/proxy-client-ca.crt ...
I0923 10:21:13.369258 11849 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19689-3716/.minikube/proxy-client-ca.crt: {Name:mk874ffb9fa0ee62ad3f9f8555749889b96c2bfb Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0923 10:21:13.369406 11849 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19689-3716/.minikube/proxy-client-ca.key ...
I0923 10:21:13.369417 11849 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19689-3716/.minikube/proxy-client-ca.key: {Name:mk6e640e370418bc60001436b4c9990d7154f49d Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0923 10:21:13.369485 11849 certs.go:256] generating profile certs ...
I0923 10:21:13.369547 11849 certs.go:363] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/19689-3716/.minikube/profiles/addons-071702/client.key
I0923 10:21:13.369558 11849 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19689-3716/.minikube/profiles/addons-071702/client.crt with IP's: []
I0923 10:21:13.513258 11849 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19689-3716/.minikube/profiles/addons-071702/client.crt ...
I0923 10:21:13.513286 11849 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19689-3716/.minikube/profiles/addons-071702/client.crt: {Name:mk4b1aa4ced3db79080162b70201af0fb22562d8 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0923 10:21:13.513440 11849 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19689-3716/.minikube/profiles/addons-071702/client.key ...
I0923 10:21:13.513450 11849 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19689-3716/.minikube/profiles/addons-071702/client.key: {Name:mkcb27a071b7719cfb248c04e418edd1d24ef2a7 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0923 10:21:13.513513 11849 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/19689-3716/.minikube/profiles/addons-071702/apiserver.key.a45bc325
I0923 10:21:13.513529 11849 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19689-3716/.minikube/profiles/addons-071702/apiserver.crt.a45bc325 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.49.2]
I0923 10:21:13.697552 11849 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19689-3716/.minikube/profiles/addons-071702/apiserver.crt.a45bc325 ...
I0923 10:21:13.697579 11849 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19689-3716/.minikube/profiles/addons-071702/apiserver.crt.a45bc325: {Name:mkcca7dc77b9cc88c698c1d8e272691703ff1324 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0923 10:21:13.697717 11849 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19689-3716/.minikube/profiles/addons-071702/apiserver.key.a45bc325 ...
I0923 10:21:13.697729 11849 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19689-3716/.minikube/profiles/addons-071702/apiserver.key.a45bc325: {Name:mkb8b4732547ee15186be8455d9101c5a1a84134 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0923 10:21:13.697792 11849 certs.go:381] copying /home/jenkins/minikube-integration/19689-3716/.minikube/profiles/addons-071702/apiserver.crt.a45bc325 -> /home/jenkins/minikube-integration/19689-3716/.minikube/profiles/addons-071702/apiserver.crt
I0923 10:21:13.697862 11849 certs.go:385] copying /home/jenkins/minikube-integration/19689-3716/.minikube/profiles/addons-071702/apiserver.key.a45bc325 -> /home/jenkins/minikube-integration/19689-3716/.minikube/profiles/addons-071702/apiserver.key
I0923 10:21:13.697905 11849 certs.go:363] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/19689-3716/.minikube/profiles/addons-071702/proxy-client.key
I0923 10:21:13.697920 11849 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19689-3716/.minikube/profiles/addons-071702/proxy-client.crt with IP's: []
I0923 10:21:13.869119 11849 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19689-3716/.minikube/profiles/addons-071702/proxy-client.crt ...
I0923 10:21:13.869146 11849 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19689-3716/.minikube/profiles/addons-071702/proxy-client.crt: {Name:mk0a25112ffa3e7adb247c14f82a0442ecae8d86 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0923 10:21:13.869299 11849 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19689-3716/.minikube/profiles/addons-071702/proxy-client.key ...
I0923 10:21:13.869313 11849 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19689-3716/.minikube/profiles/addons-071702/proxy-client.key: {Name:mkd2a6d29bddf8d397e07273635613eea89116a7 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0923 10:21:13.869489 11849 certs.go:484] found cert: /home/jenkins/minikube-integration/19689-3716/.minikube/certs/ca-key.pem (1675 bytes)
I0923 10:21:13.869520 11849 certs.go:484] found cert: /home/jenkins/minikube-integration/19689-3716/.minikube/certs/ca.pem (1082 bytes)
I0923 10:21:13.869544 11849 certs.go:484] found cert: /home/jenkins/minikube-integration/19689-3716/.minikube/certs/cert.pem (1123 bytes)
I0923 10:21:13.869565 11849 certs.go:484] found cert: /home/jenkins/minikube-integration/19689-3716/.minikube/certs/key.pem (1675 bytes)
I0923 10:21:13.870082 11849 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19689-3716/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0923 10:21:13.891405 11849 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19689-3716/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
I0923 10:21:13.910799 11849 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19689-3716/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0923 10:21:13.930070 11849 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19689-3716/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I0923 10:21:13.949590 11849 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19689-3716/.minikube/profiles/addons-071702/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1419 bytes)
I0923 10:21:13.969164 11849 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19689-3716/.minikube/profiles/addons-071702/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I0923 10:21:13.988605 11849 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19689-3716/.minikube/profiles/addons-071702/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0923 10:21:14.007985 11849 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19689-3716/.minikube/profiles/addons-071702/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I0923 10:21:14.027133 11849 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19689-3716/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0923 10:21:14.046546 11849 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I0923 10:21:14.060793 11849 ssh_runner.go:195] Run: openssl version
I0923 10:21:14.065362 11849 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0923 10:21:14.072919 11849 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0923 10:21:14.075650 11849 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Sep 23 10:21 /usr/share/ca-certificates/minikubeCA.pem
I0923 10:21:14.075695 11849 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0923 10:21:14.081431 11849 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0923 10:21:14.088966 11849 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0923 10:21:14.091650 11849 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I0923 10:21:14.091690 11849 kubeadm.go:392] StartCluster: {Name:addons-071702 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.1 ClusterName:addons-071702 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames
:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirm
warePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0923 10:21:14.091817 11849 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I0923 10:21:14.107705 11849 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I0923 10:21:14.115156 11849 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I0923 10:21:14.122487 11849 kubeadm.go:214] ignoring SystemVerification for kubeadm because of docker driver
I0923 10:21:14.122531 11849 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I0923 10:21:14.129530 11849 kubeadm.go:155] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I0923 10:21:14.129547 11849 kubeadm.go:157] found existing configuration files:
I0923 10:21:14.129583 11849 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I0923 10:21:14.136461 11849 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I0923 10:21:14.136496 11849 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I0923 10:21:14.143180 11849 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I0923 10:21:14.149913 11849 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I0923 10:21:14.149959 11849 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I0923 10:21:14.156579 11849 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I0923 10:21:14.163604 11849 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I0923 10:21:14.163642 11849 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I0923 10:21:14.170388 11849 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I0923 10:21:14.177229 11849 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I0923 10:21:14.177267 11849 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I0923 10:21:14.184108 11849 ssh_runner.go:286] Start: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.31.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I0923 10:21:14.214323 11849 kubeadm.go:310] W0923 10:21:14.213757 1926 common.go:101] your configuration file uses a deprecated API spec: "kubeadm.k8s.io/v1beta3" (kind: "ClusterConfiguration"). Please use 'kubeadm config migrate --old-config old.yaml --new-config new.yaml', which will write the new, similar spec using a newer API version.
I0923 10:21:14.214809 11849 kubeadm.go:310] W0923 10:21:14.214356 1926 common.go:101] your configuration file uses a deprecated API spec: "kubeadm.k8s.io/v1beta3" (kind: "InitConfiguration"). Please use 'kubeadm config migrate --old-config old.yaml --new-config new.yaml', which will write the new, similar spec using a newer API version.
I0923 10:21:14.232987 11849 kubeadm.go:310] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1069-gcp\n", err: exit status 1
I0923 10:21:14.281745 11849 kubeadm.go:310] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I0923 10:21:24.003974 11849 kubeadm.go:310] [init] Using Kubernetes version: v1.31.1
I0923 10:21:24.004048 11849 kubeadm.go:310] [preflight] Running pre-flight checks
I0923 10:21:24.004151 11849 kubeadm.go:310] [preflight] The system verification failed. Printing the output from the verification:
I0923 10:21:24.004236 11849 kubeadm.go:310] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1069-gcp[0m
I0923 10:21:24.004298 11849 kubeadm.go:310] [0;37mOS[0m: [0;32mLinux[0m
I0923 10:21:24.004364 11849 kubeadm.go:310] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I0923 10:21:24.004434 11849 kubeadm.go:310] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I0923 10:21:24.004506 11849 kubeadm.go:310] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I0923 10:21:24.004577 11849 kubeadm.go:310] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I0923 10:21:24.004628 11849 kubeadm.go:310] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I0923 10:21:24.004674 11849 kubeadm.go:310] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I0923 10:21:24.004715 11849 kubeadm.go:310] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I0923 10:21:24.004756 11849 kubeadm.go:310] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I0923 10:21:24.004795 11849 kubeadm.go:310] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I0923 10:21:24.004860 11849 kubeadm.go:310] [preflight] Pulling images required for setting up a Kubernetes cluster
I0923 10:21:24.004953 11849 kubeadm.go:310] [preflight] This might take a minute or two, depending on the speed of your internet connection
I0923 10:21:24.005049 11849 kubeadm.go:310] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I0923 10:21:24.005121 11849 kubeadm.go:310] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I0923 10:21:24.006841 11849 out.go:235] - Generating certificates and keys ...
I0923 10:21:24.006934 11849 kubeadm.go:310] [certs] Using existing ca certificate authority
I0923 10:21:24.007019 11849 kubeadm.go:310] [certs] Using existing apiserver certificate and key on disk
I0923 10:21:24.007108 11849 kubeadm.go:310] [certs] Generating "apiserver-kubelet-client" certificate and key
I0923 10:21:24.007195 11849 kubeadm.go:310] [certs] Generating "front-proxy-ca" certificate and key
I0923 10:21:24.007289 11849 kubeadm.go:310] [certs] Generating "front-proxy-client" certificate and key
I0923 10:21:24.007354 11849 kubeadm.go:310] [certs] Generating "etcd/ca" certificate and key
I0923 10:21:24.007434 11849 kubeadm.go:310] [certs] Generating "etcd/server" certificate and key
I0923 10:21:24.007542 11849 kubeadm.go:310] [certs] etcd/server serving cert is signed for DNS names [addons-071702 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0923 10:21:24.007591 11849 kubeadm.go:310] [certs] Generating "etcd/peer" certificate and key
I0923 10:21:24.007709 11849 kubeadm.go:310] [certs] etcd/peer serving cert is signed for DNS names [addons-071702 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0923 10:21:24.007775 11849 kubeadm.go:310] [certs] Generating "etcd/healthcheck-client" certificate and key
I0923 10:21:24.007832 11849 kubeadm.go:310] [certs] Generating "apiserver-etcd-client" certificate and key
I0923 10:21:24.007870 11849 kubeadm.go:310] [certs] Generating "sa" key and public key
I0923 10:21:24.007917 11849 kubeadm.go:310] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I0923 10:21:24.007960 11849 kubeadm.go:310] [kubeconfig] Writing "admin.conf" kubeconfig file
I0923 10:21:24.008019 11849 kubeadm.go:310] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I0923 10:21:24.008069 11849 kubeadm.go:310] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I0923 10:21:24.008126 11849 kubeadm.go:310] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I0923 10:21:24.008172 11849 kubeadm.go:310] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I0923 10:21:24.008255 11849 kubeadm.go:310] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I0923 10:21:24.008344 11849 kubeadm.go:310] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I0923 10:21:24.009630 11849 out.go:235] - Booting up control plane ...
I0923 10:21:24.009707 11849 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-apiserver"
I0923 10:21:24.009785 11849 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I0923 10:21:24.009859 11849 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-scheduler"
I0923 10:21:24.009957 11849 kubeadm.go:310] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I0923 10:21:24.010042 11849 kubeadm.go:310] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I0923 10:21:24.010076 11849 kubeadm.go:310] [kubelet-start] Starting the kubelet
I0923 10:21:24.010254 11849 kubeadm.go:310] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I0923 10:21:24.010348 11849 kubeadm.go:310] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I0923 10:21:24.010398 11849 kubeadm.go:310] [kubelet-check] The kubelet is healthy after 1.000889784s
I0923 10:21:24.010461 11849 kubeadm.go:310] [api-check] Waiting for a healthy API server. This can take up to 4m0s
I0923 10:21:24.010518 11849 kubeadm.go:310] [api-check] The API server is healthy after 4.501155743s
I0923 10:21:24.010604 11849 kubeadm.go:310] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I0923 10:21:24.010736 11849 kubeadm.go:310] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I0923 10:21:24.010804 11849 kubeadm.go:310] [upload-certs] Skipping phase. Please see --upload-certs
I0923 10:21:24.011016 11849 kubeadm.go:310] [mark-control-plane] Marking the node addons-071702 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I0923 10:21:24.011066 11849 kubeadm.go:310] [bootstrap-token] Using token: q08hz0.03uhaekc526vsuab
I0923 10:21:24.012416 11849 out.go:235] - Configuring RBAC rules ...
I0923 10:21:24.012551 11849 kubeadm.go:310] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I0923 10:21:24.012680 11849 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I0923 10:21:24.012842 11849 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I0923 10:21:24.013030 11849 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I0923 10:21:24.013197 11849 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I0923 10:21:24.013321 11849 kubeadm.go:310] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I0923 10:21:24.013467 11849 kubeadm.go:310] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I0923 10:21:24.013527 11849 kubeadm.go:310] [addons] Applied essential addon: CoreDNS
I0923 10:21:24.013588 11849 kubeadm.go:310] [addons] Applied essential addon: kube-proxy
I0923 10:21:24.013597 11849 kubeadm.go:310]
I0923 10:21:24.013675 11849 kubeadm.go:310] Your Kubernetes control-plane has initialized successfully!
I0923 10:21:24.013685 11849 kubeadm.go:310]
I0923 10:21:24.013754 11849 kubeadm.go:310] To start using your cluster, you need to run the following as a regular user:
I0923 10:21:24.013762 11849 kubeadm.go:310]
I0923 10:21:24.013798 11849 kubeadm.go:310] mkdir -p $HOME/.kube
I0923 10:21:24.013893 11849 kubeadm.go:310] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I0923 10:21:24.013968 11849 kubeadm.go:310] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I0923 10:21:24.013977 11849 kubeadm.go:310]
I0923 10:21:24.014048 11849 kubeadm.go:310] Alternatively, if you are the root user, you can run:
I0923 10:21:24.014063 11849 kubeadm.go:310]
I0923 10:21:24.014146 11849 kubeadm.go:310] export KUBECONFIG=/etc/kubernetes/admin.conf
I0923 10:21:24.014165 11849 kubeadm.go:310]
I0923 10:21:24.014246 11849 kubeadm.go:310] You should now deploy a pod network to the cluster.
I0923 10:21:24.014352 11849 kubeadm.go:310] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I0923 10:21:24.014450 11849 kubeadm.go:310] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I0923 10:21:24.014460 11849 kubeadm.go:310]
I0923 10:21:24.014577 11849 kubeadm.go:310] You can now join any number of control-plane nodes by copying certificate authorities
I0923 10:21:24.014741 11849 kubeadm.go:310] and service account keys on each node and then running the following as root:
I0923 10:21:24.014753 11849 kubeadm.go:310]
I0923 10:21:24.014870 11849 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token q08hz0.03uhaekc526vsuab \
I0923 10:21:24.015017 11849 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:0d03b657b152f798f7054c01bcc82223b1aa4fcdc63266f7dbd1161e47a64a65 \
I0923 10:21:24.015058 11849 kubeadm.go:310] --control-plane
I0923 10:21:24.015066 11849 kubeadm.go:310]
I0923 10:21:24.015145 11849 kubeadm.go:310] Then you can join any number of worker nodes by running the following on each as root:
I0923 10:21:24.015152 11849 kubeadm.go:310]
I0923 10:21:24.015218 11849 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token q08hz0.03uhaekc526vsuab \
I0923 10:21:24.015324 11849 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:0d03b657b152f798f7054c01bcc82223b1aa4fcdc63266f7dbd1161e47a64a65
I0923 10:21:24.015338 11849 cni.go:84] Creating CNI manager for ""
I0923 10:21:24.015350 11849 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0923 10:21:24.016872 11849 out.go:177] * Configuring bridge CNI (Container Networking Interface) ...
I0923 10:21:24.017957 11849 ssh_runner.go:195] Run: sudo mkdir -p /etc/cni/net.d
I0923 10:21:24.025767 11849 ssh_runner.go:362] scp memory --> /etc/cni/net.d/1-k8s.conflist (496 bytes)
I0923 10:21:24.040799 11849 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I0923 10:21:24.040863 11849 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I0923 10:21:24.040880 11849 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes addons-071702 minikube.k8s.io/updated_at=2024_09_23T10_21_24_0700 minikube.k8s.io/version=v1.34.0 minikube.k8s.io/commit=f69bf2f8ed9442c9c01edbe27466c5398c68b986 minikube.k8s.io/name=addons-071702 minikube.k8s.io/primary=true
I0923 10:21:24.047434 11849 ops.go:34] apiserver oom_adj: -16
I0923 10:21:24.121098 11849 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0923 10:21:24.621360 11849 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0923 10:21:25.121323 11849 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0923 10:21:25.621391 11849 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0923 10:21:26.121269 11849 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0923 10:21:26.621676 11849 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0923 10:21:27.121875 11849 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0923 10:21:27.622100 11849 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0923 10:21:28.121835 11849 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0923 10:21:28.621161 11849 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0923 10:21:28.706836 11849 kubeadm.go:1113] duration metric: took 4.666029899s to wait for elevateKubeSystemPrivileges
I0923 10:21:28.706874 11849 kubeadm.go:394] duration metric: took 14.615187242s to StartCluster
I0923 10:21:28.706917 11849 settings.go:142] acquiring lock: {Name:mka450178266ead0466f3a326c9a6756b4479447 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0923 10:21:28.707014 11849 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/19689-3716/kubeconfig
I0923 10:21:28.707330 11849 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19689-3716/kubeconfig: {Name:mk679719faf37a9364b3938ba88d54cbed720fd3 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0923 10:21:28.707555 11849 start.go:235] Will wait 6m0s for node &{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}
I0923 10:21:28.707665 11849 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I0923 10:21:28.707665 11849 addons.go:507] enable addons start: toEnable=map[ambassador:false auto-pause:false cloud-spanner:true csi-hostpath-driver:true dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:true gvisor:false headlamp:false inaccel:false ingress:true ingress-dns:true inspektor-gadget:true istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:true nvidia-device-plugin:true nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:true registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:true volcano:true volumesnapshots:true yakd:true]
I0923 10:21:28.707783 11849 addons.go:69] Setting yakd=true in profile "addons-071702"
I0923 10:21:28.707793 11849 addons.go:69] Setting inspektor-gadget=true in profile "addons-071702"
I0923 10:21:28.707893 11849 addons.go:234] Setting addon inspektor-gadget=true in "addons-071702"
I0923 10:21:28.707892 11849 addons.go:69] Setting storage-provisioner=true in profile "addons-071702"
I0923 10:21:28.707916 11849 addons.go:234] Setting addon storage-provisioner=true in "addons-071702"
I0923 10:21:28.707934 11849 host.go:66] Checking if "addons-071702" exists ...
I0923 10:21:28.707952 11849 host.go:66] Checking if "addons-071702" exists ...
I0923 10:21:28.707805 11849 addons.go:234] Setting addon yakd=true in "addons-071702"
I0923 10:21:28.708022 11849 host.go:66] Checking if "addons-071702" exists ...
I0923 10:21:28.707814 11849 addons.go:69] Setting storage-provisioner-rancher=true in profile "addons-071702"
I0923 10:21:28.707822 11849 addons.go:69] Setting volcano=true in profile "addons-071702"
I0923 10:21:28.708101 11849 addons_storage_classes.go:33] enableOrDisableStorageClasses storage-provisioner-rancher=true on "addons-071702"
I0923 10:21:28.708105 11849 addons.go:234] Setting addon volcano=true in "addons-071702"
I0923 10:21:28.707842 11849 addons.go:69] Setting default-storageclass=true in profile "addons-071702"
I0923 10:21:28.708237 11849 host.go:66] Checking if "addons-071702" exists ...
I0923 10:21:28.708254 11849 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "addons-071702"
I0923 10:21:28.708389 11849 cli_runner.go:164] Run: docker container inspect addons-071702 --format={{.State.Status}}
I0923 10:21:28.708519 11849 cli_runner.go:164] Run: docker container inspect addons-071702 --format={{.State.Status}}
I0923 10:21:28.707822 11849 addons.go:69] Setting nvidia-device-plugin=true in profile "addons-071702"
I0923 10:21:28.708523 11849 cli_runner.go:164] Run: docker container inspect addons-071702 --format={{.State.Status}}
I0923 10:21:28.708645 11849 addons.go:234] Setting addon nvidia-device-plugin=true in "addons-071702"
I0923 10:21:28.708673 11849 cli_runner.go:164] Run: docker container inspect addons-071702 --format={{.State.Status}}
I0923 10:21:28.707849 11849 addons.go:69] Setting cloud-spanner=true in profile "addons-071702"
I0923 10:21:28.708889 11849 addons.go:234] Setting addon cloud-spanner=true in "addons-071702"
I0923 10:21:28.708921 11849 host.go:66] Checking if "addons-071702" exists ...
I0923 10:21:28.707857 11849 addons.go:69] Setting csi-hostpath-driver=true in profile "addons-071702"
I0923 10:21:28.708986 11849 cli_runner.go:164] Run: docker container inspect addons-071702 --format={{.State.Status}}
I0923 10:21:28.709020 11849 addons.go:234] Setting addon csi-hostpath-driver=true in "addons-071702"
I0923 10:21:28.709048 11849 host.go:66] Checking if "addons-071702" exists ...
I0923 10:21:28.709363 11849 cli_runner.go:164] Run: docker container inspect addons-071702 --format={{.State.Status}}
I0923 10:21:28.709469 11849 cli_runner.go:164] Run: docker container inspect addons-071702 --format={{.State.Status}}
I0923 10:21:28.707857 11849 addons.go:69] Setting ingress=true in profile "addons-071702"
I0923 10:21:28.709719 11849 addons.go:234] Setting addon ingress=true in "addons-071702"
I0923 10:21:28.709769 11849 host.go:66] Checking if "addons-071702" exists ...
I0923 10:21:28.710176 11849 cli_runner.go:164] Run: docker container inspect addons-071702 --format={{.State.Status}}
I0923 10:21:28.708675 11849 host.go:66] Checking if "addons-071702" exists ...
I0923 10:21:28.707861 11849 addons.go:69] Setting registry=true in profile "addons-071702"
I0923 10:21:28.710508 11849 addons.go:234] Setting addon registry=true in "addons-071702"
I0923 10:21:28.710555 11849 host.go:66] Checking if "addons-071702" exists ...
I0923 10:21:28.710840 11849 cli_runner.go:164] Run: docker container inspect addons-071702 --format={{.State.Status}}
I0923 10:21:28.711098 11849 out.go:177] * Verifying Kubernetes components...
I0923 10:21:28.711361 11849 cli_runner.go:164] Run: docker container inspect addons-071702 --format={{.State.Status}}
I0923 10:21:28.707868 11849 addons.go:69] Setting ingress-dns=true in profile "addons-071702"
I0923 10:21:28.711525 11849 addons.go:234] Setting addon ingress-dns=true in "addons-071702"
I0923 10:21:28.711597 11849 host.go:66] Checking if "addons-071702" exists ...
I0923 10:21:28.707860 11849 addons.go:69] Setting gcp-auth=true in profile "addons-071702"
I0923 10:21:28.707830 11849 addons.go:69] Setting volumesnapshots=true in profile "addons-071702"
I0923 10:21:28.708531 11849 cli_runner.go:164] Run: docker container inspect addons-071702 --format={{.State.Status}}
I0923 10:21:28.707845 11849 config.go:182] Loaded profile config "addons-071702": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.31.1
I0923 10:21:28.707826 11849 addons.go:69] Setting metrics-server=true in profile "addons-071702"
I0923 10:21:28.711830 11849 addons.go:234] Setting addon metrics-server=true in "addons-071702"
I0923 10:21:28.711883 11849 host.go:66] Checking if "addons-071702" exists ...
I0923 10:21:28.712438 11849 cli_runner.go:164] Run: docker container inspect addons-071702 --format={{.State.Status}}
I0923 10:21:28.712595 11849 addons.go:234] Setting addon volumesnapshots=true in "addons-071702"
I0923 10:21:28.712730 11849 host.go:66] Checking if "addons-071702" exists ...
I0923 10:21:28.712981 11849 mustload.go:65] Loading cluster: addons-071702
I0923 10:21:28.714170 11849 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0923 10:21:28.739013 11849 out.go:177] - Using image gcr.io/cloud-spanner-emulator/emulator:1.5.24
I0923 10:21:28.740442 11849 addons.go:431] installing /etc/kubernetes/addons/deployment.yaml
I0923 10:21:28.740466 11849 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/deployment.yaml (1004 bytes)
I0923 10:21:28.740520 11849 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-071702
I0923 10:21:28.747437 11849 config.go:182] Loaded profile config "addons-071702": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.31.1
I0923 10:21:28.747685 11849 cli_runner.go:164] Run: docker container inspect addons-071702 --format={{.State.Status}}
I0923 10:21:28.747815 11849 addons.go:234] Setting addon storage-provisioner-rancher=true in "addons-071702"
I0923 10:21:28.747864 11849 host.go:66] Checking if "addons-071702" exists ...
I0923 10:21:28.748101 11849 cli_runner.go:164] Run: docker container inspect addons-071702 --format={{.State.Status}}
I0923 10:21:28.748286 11849 cli_runner.go:164] Run: docker container inspect addons-071702 --format={{.State.Status}}
I0923 10:21:28.748481 11849 cli_runner.go:164] Run: docker container inspect addons-071702 --format={{.State.Status}}
I0923 10:21:28.757125 11849 out.go:177] - Using image docker.io/volcanosh/vc-webhook-manager:v1.9.0
I0923 10:21:28.758617 11849 out.go:177] - Using image docker.io/volcanosh/vc-controller-manager:v1.9.0
I0923 10:21:28.759906 11849 out.go:177] - Using image docker.io/volcanosh/vc-scheduler:v1.9.0
I0923 10:21:28.762037 11849 addons.go:431] installing /etc/kubernetes/addons/volcano-deployment.yaml
I0923 10:21:28.762060 11849 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/volcano-deployment.yaml (434001 bytes)
I0923 10:21:28.762115 11849 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-071702
I0923 10:21:28.788162 11849 out.go:177] - Using image registry.k8s.io/sig-storage/csi-external-health-monitor-controller:v0.7.0
I0923 10:21:28.790219 11849 out.go:177] - Using image registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.6.0
I0923 10:21:28.790865 11849 addons.go:234] Setting addon default-storageclass=true in "addons-071702"
I0923 10:21:28.790910 11849 host.go:66] Checking if "addons-071702" exists ...
I0923 10:21:28.791381 11849 cli_runner.go:164] Run: docker container inspect addons-071702 --format={{.State.Status}}
I0923 10:21:28.793557 11849 out.go:177] - Using image registry.k8s.io/sig-storage/hostpathplugin:v1.9.0
I0923 10:21:28.794597 11849 out.go:177] - Using image ghcr.io/inspektor-gadget/inspektor-gadget:v0.32.0
I0923 10:21:28.794714 11849 out.go:177] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I0923 10:21:28.795803 11849 out.go:177] - Using image registry.k8s.io/sig-storage/livenessprobe:v2.8.0
I0923 10:21:28.796258 11849 addons.go:431] installing /etc/kubernetes/addons/ig-namespace.yaml
I0923 10:21:28.796271 11849 ssh_runner.go:362] scp inspektor-gadget/ig-namespace.yaml --> /etc/kubernetes/addons/ig-namespace.yaml (55 bytes)
I0923 10:21:28.796334 11849 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-071702
I0923 10:21:28.796606 11849 addons.go:431] installing /etc/kubernetes/addons/storage-provisioner.yaml
I0923 10:21:28.796621 11849 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I0923 10:21:28.796671 11849 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-071702
I0923 10:21:28.797863 11849 out.go:177] - Using image registry.k8s.io/sig-storage/csi-resizer:v1.6.0
I0923 10:21:28.799535 11849 out.go:177] - Using image registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0
I0923 10:21:28.801362 11849 out.go:177] - Using image registry.k8s.io/sig-storage/csi-provisioner:v3.3.0
I0923 10:21:28.802519 11849 out.go:177] - Using image registry.k8s.io/sig-storage/csi-attacher:v4.0.0
I0923 10:21:28.803493 11849 addons.go:431] installing /etc/kubernetes/addons/rbac-external-attacher.yaml
I0923 10:21:28.803513 11849 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-attacher.yaml --> /etc/kubernetes/addons/rbac-external-attacher.yaml (3073 bytes)
I0923 10:21:28.803533 11849 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19689-3716/.minikube/machines/addons-071702/id_rsa Username:docker}
I0923 10:21:28.803571 11849 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-071702
I0923 10:21:28.803800 11849 out.go:177] - Using image nvcr.io/nvidia/k8s-device-plugin:v0.16.2
I0923 10:21:28.805725 11849 out.go:177] - Using image docker.io/marcnuri/yakd:0.0.5
I0923 10:21:28.805779 11849 addons.go:431] installing /etc/kubernetes/addons/nvidia-device-plugin.yaml
I0923 10:21:28.805798 11849 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/nvidia-device-plugin.yaml (1966 bytes)
I0923 10:21:28.805847 11849 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-071702
I0923 10:21:28.806912 11849 addons.go:431] installing /etc/kubernetes/addons/yakd-ns.yaml
I0923 10:21:28.806935 11849 ssh_runner.go:362] scp yakd/yakd-ns.yaml --> /etc/kubernetes/addons/yakd-ns.yaml (171 bytes)
I0923 10:21:28.806982 11849 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-071702
I0923 10:21:28.819858 11849 out.go:177] - Using image registry.k8s.io/metrics-server/metrics-server:v0.7.2
I0923 10:21:28.819928 11849 out.go:177] - Using image docker.io/registry:2.8.3
I0923 10:21:28.829547 11849 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19689-3716/.minikube/machines/addons-071702/id_rsa Username:docker}
I0923 10:21:28.832290 11849 out.go:177] - Using image registry.k8s.io/ingress-nginx/controller:v1.11.2
I0923 10:21:28.832357 11849 addons.go:431] installing /etc/kubernetes/addons/metrics-apiservice.yaml
I0923 10:21:28.832368 11849 ssh_runner.go:362] scp metrics-server/metrics-apiservice.yaml --> /etc/kubernetes/addons/metrics-apiservice.yaml (424 bytes)
I0923 10:21:28.832417 11849 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-071702
I0923 10:21:28.833348 11849 out.go:177] - Using image gcr.io/k8s-minikube/kube-registry-proxy:0.0.6
I0923 10:21:28.834514 11849 addons.go:431] installing /etc/kubernetes/addons/registry-rc.yaml
I0923 10:21:28.834532 11849 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-rc.yaml (860 bytes)
I0923 10:21:28.834578 11849 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-071702
I0923 10:21:28.835323 11849 host.go:66] Checking if "addons-071702" exists ...
I0923 10:21:28.835687 11849 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.3
I0923 10:21:28.836780 11849 out.go:177] - Using image registry.k8s.io/sig-storage/snapshot-controller:v6.1.0
I0923 10:21:28.837804 11849 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml
I0923 10:21:28.837819 11849 ssh_runner.go:362] scp volumesnapshots/csi-hostpath-snapshotclass.yaml --> /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml (934 bytes)
I0923 10:21:28.837859 11849 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-071702
I0923 10:21:28.838301 11849 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.3
I0923 10:21:28.839811 11849 addons.go:431] installing /etc/kubernetes/addons/ingress-deploy.yaml
I0923 10:21:28.839828 11849 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ingress-deploy.yaml (16078 bytes)
I0923 10:21:28.839867 11849 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-071702
I0923 10:21:28.857360 11849 out.go:177] - Using image gcr.io/k8s-minikube/minikube-ingress-dns:0.0.3
I0923 10:21:28.858915 11849 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19689-3716/.minikube/machines/addons-071702/id_rsa Username:docker}
I0923 10:21:28.863200 11849 addons.go:431] installing /etc/kubernetes/addons/ingress-dns-pod.yaml
I0923 10:21:28.863218 11849 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ingress-dns-pod.yaml (2442 bytes)
I0923 10:21:28.863267 11849 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-071702
I0923 10:21:28.867023 11849 out.go:177] - Using image docker.io/rancher/local-path-provisioner:v0.0.22
I0923 10:21:28.869825 11849 out.go:177] - Using image docker.io/busybox:stable
I0923 10:21:28.871280 11849 addons.go:431] installing /etc/kubernetes/addons/storage-provisioner-rancher.yaml
I0923 10:21:28.871306 11849 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner-rancher.yaml (3113 bytes)
I0923 10:21:28.871365 11849 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-071702
I0923 10:21:28.876887 11849 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19689-3716/.minikube/machines/addons-071702/id_rsa Username:docker}
I0923 10:21:28.881504 11849 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19689-3716/.minikube/machines/addons-071702/id_rsa Username:docker}
I0923 10:21:28.890863 11849 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19689-3716/.minikube/machines/addons-071702/id_rsa Username:docker}
I0923 10:21:28.893700 11849 addons.go:431] installing /etc/kubernetes/addons/storageclass.yaml
I0923 10:21:28.893723 11849 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I0923 10:21:28.893773 11849 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-071702
I0923 10:21:28.895771 11849 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19689-3716/.minikube/machines/addons-071702/id_rsa Username:docker}
I0923 10:21:28.898847 11849 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19689-3716/.minikube/machines/addons-071702/id_rsa Username:docker}
I0923 10:21:28.899277 11849 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19689-3716/.minikube/machines/addons-071702/id_rsa Username:docker}
I0923 10:21:28.901760 11849 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19689-3716/.minikube/machines/addons-071702/id_rsa Username:docker}
I0923 10:21:28.908691 11849 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19689-3716/.minikube/machines/addons-071702/id_rsa Username:docker}
I0923 10:21:28.927920 11849 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19689-3716/.minikube/machines/addons-071702/id_rsa Username:docker}
I0923 10:21:28.931250 11849 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19689-3716/.minikube/machines/addons-071702/id_rsa Username:docker}
I0923 10:21:28.933830 11849 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19689-3716/.minikube/machines/addons-071702/id_rsa Username:docker}
W0923 10:21:28.954852 11849 sshutil.go:64] dial failure (will retry): ssh: handshake failed: EOF
I0923 10:21:28.954885 11849 retry.go:31] will retry after 253.04655ms: ssh: handshake failed: EOF
I0923 10:21:29.077382 11849 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0923 10:21:29.077457 11849 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I0923 10:21:29.262392 11849 addons.go:431] installing /etc/kubernetes/addons/rbac-hostpath.yaml
I0923 10:21:29.262473 11849 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-hostpath.yaml --> /etc/kubernetes/addons/rbac-hostpath.yaml (4266 bytes)
I0923 10:21:29.267293 11849 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ingress-dns-pod.yaml
I0923 10:21:29.360252 11849 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I0923 10:21:29.360286 11849 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/volcano-deployment.yaml
I0923 10:21:29.364626 11849 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ingress-deploy.yaml
I0923 10:21:29.371732 11849 addons.go:431] installing /etc/kubernetes/addons/metrics-server-deployment.yaml
I0923 10:21:29.371767 11849 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-deployment.yaml (1907 bytes)
I0923 10:21:29.459316 11849 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/nvidia-device-plugin.yaml
I0923 10:21:29.460430 11849 addons.go:431] installing /etc/kubernetes/addons/ig-serviceaccount.yaml
I0923 10:21:29.460489 11849 ssh_runner.go:362] scp inspektor-gadget/ig-serviceaccount.yaml --> /etc/kubernetes/addons/ig-serviceaccount.yaml (80 bytes)
I0923 10:21:29.471782 11849 addons.go:431] installing /etc/kubernetes/addons/yakd-sa.yaml
I0923 10:21:29.471860 11849 ssh_runner.go:362] scp yakd/yakd-sa.yaml --> /etc/kubernetes/addons/yakd-sa.yaml (247 bytes)
I0923 10:21:29.473835 11849 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner-rancher.yaml
I0923 10:21:29.553333 11849 addons.go:431] installing /etc/kubernetes/addons/registry-svc.yaml
I0923 10:21:29.553420 11849 ssh_runner.go:362] scp registry/registry-svc.yaml --> /etc/kubernetes/addons/registry-svc.yaml (398 bytes)
I0923 10:21:29.560017 11849 addons.go:431] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml
I0923 10:21:29.560098 11849 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshotclasses.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml (6471 bytes)
I0923 10:21:29.561111 11849 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/deployment.yaml
I0923 10:21:29.575179 11849 addons.go:431] installing /etc/kubernetes/addons/metrics-server-rbac.yaml
I0923 10:21:29.575265 11849 ssh_runner.go:362] scp metrics-server/metrics-server-rbac.yaml --> /etc/kubernetes/addons/metrics-server-rbac.yaml (2175 bytes)
I0923 10:21:29.662775 11849 addons.go:431] installing /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml
I0923 10:21:29.662804 11849 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-health-monitor-controller.yaml --> /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml (3038 bytes)
I0923 10:21:29.759244 11849 addons.go:431] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml
I0923 10:21:29.759328 11849 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshotcontents.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml (23126 bytes)
I0923 10:21:29.873211 11849 addons.go:431] installing /etc/kubernetes/addons/yakd-crb.yaml
I0923 10:21:29.873295 11849 ssh_runner.go:362] scp yakd/yakd-crb.yaml --> /etc/kubernetes/addons/yakd-crb.yaml (422 bytes)
I0923 10:21:29.955601 11849 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I0923 10:21:30.054418 11849 addons.go:431] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml
I0923 10:21:30.054511 11849 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshots.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml (19582 bytes)
I0923 10:21:30.059457 11849 addons.go:431] installing /etc/kubernetes/addons/rbac-external-provisioner.yaml
I0923 10:21:30.059534 11849 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-provisioner.yaml --> /etc/kubernetes/addons/rbac-external-provisioner.yaml (4442 bytes)
I0923 10:21:30.073658 11849 addons.go:431] installing /etc/kubernetes/addons/metrics-server-service.yaml
I0923 10:21:30.073735 11849 ssh_runner.go:362] scp metrics-server/metrics-server-service.yaml --> /etc/kubernetes/addons/metrics-server-service.yaml (446 bytes)
I0923 10:21:30.155595 11849 addons.go:431] installing /etc/kubernetes/addons/ig-role.yaml
I0923 10:21:30.155690 11849 ssh_runner.go:362] scp inspektor-gadget/ig-role.yaml --> /etc/kubernetes/addons/ig-role.yaml (210 bytes)
I0923 10:21:30.353336 11849 addons.go:431] installing /etc/kubernetes/addons/registry-proxy.yaml
I0923 10:21:30.353363 11849 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-proxy.yaml (947 bytes)
I0923 10:21:30.460758 11849 addons.go:431] installing /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml
I0923 10:21:30.460792 11849 ssh_runner.go:362] scp volumesnapshots/rbac-volume-snapshot-controller.yaml --> /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml (3545 bytes)
I0923 10:21:30.554058 11849 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -": (1.47652869s)
I0923 10:21:30.554108 11849 start.go:971] {"host.minikube.internal": 192.168.49.1} host record injected into CoreDNS's ConfigMap
I0923 10:21:30.555346 11849 ssh_runner.go:235] Completed: sudo systemctl start kubelet: (1.477937133s)
I0923 10:21:30.556176 11849 node_ready.go:35] waiting up to 6m0s for node "addons-071702" to be "Ready" ...
I0923 10:21:30.557622 11849 addons.go:431] installing /etc/kubernetes/addons/rbac-external-resizer.yaml
I0923 10:21:30.557684 11849 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-resizer.yaml --> /etc/kubernetes/addons/rbac-external-resizer.yaml (2943 bytes)
I0923 10:21:30.558839 11849 node_ready.go:49] node "addons-071702" has status "Ready":"True"
I0923 10:21:30.558883 11849 node_ready.go:38] duration metric: took 2.683674ms for node "addons-071702" to be "Ready" ...
I0923 10:21:30.558903 11849 pod_ready.go:36] extra waiting up to 6m0s for all system-critical pods including labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0923 10:21:30.567022 11849 pod_ready.go:79] waiting up to 6m0s for pod "coredns-7c65d6cfc9-hd4pw" in "kube-system" namespace to be "Ready" ...
I0923 10:21:30.652510 11849 addons.go:431] installing /etc/kubernetes/addons/yakd-svc.yaml
I0923 10:21:30.652541 11849 ssh_runner.go:362] scp yakd/yakd-svc.yaml --> /etc/kubernetes/addons/yakd-svc.yaml (412 bytes)
I0923 10:21:30.678405 11849 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/registry-rc.yaml -f /etc/kubernetes/addons/registry-svc.yaml -f /etc/kubernetes/addons/registry-proxy.yaml
I0923 10:21:30.762717 11849 addons.go:431] installing /etc/kubernetes/addons/ig-rolebinding.yaml
I0923 10:21:30.762803 11849 ssh_runner.go:362] scp inspektor-gadget/ig-rolebinding.yaml --> /etc/kubernetes/addons/ig-rolebinding.yaml (244 bytes)
I0923 10:21:30.955169 11849 addons.go:431] installing /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I0923 10:21:30.955431 11849 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml (1475 bytes)
I0923 10:21:30.955406 11849 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
I0923 10:21:31.063663 11849 kapi.go:214] "coredns" deployment in "kube-system" namespace and "addons-071702" context rescaled to 1 replicas
I0923 10:21:31.063878 11849 addons.go:431] installing /etc/kubernetes/addons/rbac-external-snapshotter.yaml
I0923 10:21:31.063917 11849 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-snapshotter.yaml --> /etc/kubernetes/addons/rbac-external-snapshotter.yaml (3149 bytes)
I0923 10:21:31.360454 11849 addons.go:431] installing /etc/kubernetes/addons/yakd-dp.yaml
I0923 10:21:31.360482 11849 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/yakd-dp.yaml (2017 bytes)
I0923 10:21:31.366157 11849 addons.go:431] installing /etc/kubernetes/addons/ig-clusterrole.yaml
I0923 10:21:31.366188 11849 ssh_runner.go:362] scp inspektor-gadget/ig-clusterrole.yaml --> /etc/kubernetes/addons/ig-clusterrole.yaml (1485 bytes)
I0923 10:21:31.559432 11849 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-attacher.yaml
I0923 10:21:31.559462 11849 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-attacher.yaml (2143 bytes)
I0923 10:21:31.755610 11849 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I0923 10:21:31.857903 11849 addons.go:431] installing /etc/kubernetes/addons/ig-clusterrolebinding.yaml
I0923 10:21:31.857935 11849 ssh_runner.go:362] scp inspektor-gadget/ig-clusterrolebinding.yaml --> /etc/kubernetes/addons/ig-clusterrolebinding.yaml (274 bytes)
I0923 10:21:32.052917 11849 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/yakd-ns.yaml -f /etc/kubernetes/addons/yakd-sa.yaml -f /etc/kubernetes/addons/yakd-crb.yaml -f /etc/kubernetes/addons/yakd-svc.yaml -f /etc/kubernetes/addons/yakd-dp.yaml
I0923 10:21:32.171495 11849 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml
I0923 10:21:32.171527 11849 ssh_runner.go:362] scp csi-hostpath-driver/deploy/csi-hostpath-driverinfo.yaml --> /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml (1274 bytes)
I0923 10:21:32.353168 11849 addons.go:431] installing /etc/kubernetes/addons/ig-crd.yaml
I0923 10:21:32.353199 11849 ssh_runner.go:362] scp inspektor-gadget/ig-crd.yaml --> /etc/kubernetes/addons/ig-crd.yaml (5216 bytes)
I0923 10:21:32.451741 11849 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ingress-dns-pod.yaml: (3.184359224s)
I0923 10:21:32.571088 11849 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-plugin.yaml
I0923 10:21:32.571117 11849 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-plugin.yaml (8201 bytes)
I0923 10:21:32.652058 11849 pod_ready.go:103] pod "coredns-7c65d6cfc9-hd4pw" in "kube-system" namespace has status "Ready":"False"
I0923 10:21:32.772951 11849 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (3.412598648s)
I0923 10:21:32.854070 11849 addons.go:431] installing /etc/kubernetes/addons/ig-daemonset.yaml
I0923 10:21:32.854159 11849 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-daemonset.yaml (7735 bytes)
I0923 10:21:32.874402 11849 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-resizer.yaml
I0923 10:21:32.874428 11849 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-resizer.yaml (2191 bytes)
I0923 10:21:33.355277 11849 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ig-namespace.yaml -f /etc/kubernetes/addons/ig-serviceaccount.yaml -f /etc/kubernetes/addons/ig-role.yaml -f /etc/kubernetes/addons/ig-rolebinding.yaml -f /etc/kubernetes/addons/ig-clusterrole.yaml -f /etc/kubernetes/addons/ig-clusterrolebinding.yaml -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-daemonset.yaml
I0923 10:21:33.575246 11849 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-storageclass.yaml
I0923 10:21:33.575275 11849 ssh_runner.go:362] scp csi-hostpath-driver/deploy/csi-hostpath-storageclass.yaml --> /etc/kubernetes/addons/csi-hostpath-storageclass.yaml (846 bytes)
I0923 10:21:33.952812 11849 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/rbac-external-attacher.yaml -f /etc/kubernetes/addons/rbac-hostpath.yaml -f /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml -f /etc/kubernetes/addons/rbac-external-provisioner.yaml -f /etc/kubernetes/addons/rbac-external-resizer.yaml -f /etc/kubernetes/addons/rbac-external-snapshotter.yaml -f /etc/kubernetes/addons/csi-hostpath-attacher.yaml -f /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml -f /etc/kubernetes/addons/csi-hostpath-plugin.yaml -f /etc/kubernetes/addons/csi-hostpath-resizer.yaml -f /etc/kubernetes/addons/csi-hostpath-storageclass.yaml
I0923 10:21:35.073258 11849 pod_ready.go:103] pod "coredns-7c65d6cfc9-hd4pw" in "kube-system" namespace has status "Ready":"False"
I0923 10:21:35.856649 11849 ssh_runner.go:362] scp memory --> /var/lib/minikube/google_application_credentials.json (162 bytes)
I0923 10:21:35.856733 11849 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-071702
I0923 10:21:35.882615 11849 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19689-3716/.minikube/machines/addons-071702/id_rsa Username:docker}
I0923 10:21:36.673554 11849 ssh_runner.go:362] scp memory --> /var/lib/minikube/google_cloud_project (12 bytes)
I0923 10:21:37.062155 11849 addons.go:234] Setting addon gcp-auth=true in "addons-071702"
I0923 10:21:37.062278 11849 host.go:66] Checking if "addons-071702" exists ...
I0923 10:21:37.063048 11849 cli_runner.go:164] Run: docker container inspect addons-071702 --format={{.State.Status}}
I0923 10:21:37.086184 11849 ssh_runner.go:195] Run: cat /var/lib/minikube/google_application_credentials.json
I0923 10:21:37.086229 11849 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-071702
I0923 10:21:37.101529 11849 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19689-3716/.minikube/machines/addons-071702/id_rsa Username:docker}
I0923 10:21:37.154160 11849 pod_ready.go:103] pod "coredns-7c65d6cfc9-hd4pw" in "kube-system" namespace has status "Ready":"False"
I0923 10:21:39.575994 11849 pod_ready.go:103] pod "coredns-7c65d6cfc9-hd4pw" in "kube-system" namespace has status "Ready":"False"
I0923 10:21:40.468034 11849 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/volcano-deployment.yaml: (11.107519833s)
I0923 10:21:40.468312 11849 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/nvidia-device-plugin.yaml: (11.008906054s)
I0923 10:21:40.468374 11849 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner-rancher.yaml: (10.994483152s)
I0923 10:21:40.468404 11849 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ingress-deploy.yaml: (11.103608991s)
I0923 10:21:40.468442 11849 addons.go:475] Verifying addon ingress=true in "addons-071702"
I0923 10:21:40.468607 11849 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/registry-rc.yaml -f /etc/kubernetes/addons/registry-svc.yaml -f /etc/kubernetes/addons/registry-proxy.yaml: (9.790108408s)
I0923 10:21:40.468641 11849 addons.go:475] Verifying addon registry=true in "addons-071702"
I0923 10:21:40.468815 11849 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: (8.713176342s)
I0923 10:21:40.468462 11849 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/deployment.yaml: (10.907306639s)
W0923 10:21:40.468864 11849 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: Process exited with status 1
stdout:
customresourcedefinition.apiextensions.k8s.io/volumesnapshotclasses.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshotcontents.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshots.snapshot.storage.k8s.io created
serviceaccount/snapshot-controller created
clusterrole.rbac.authorization.k8s.io/snapshot-controller-runner created
clusterrolebinding.rbac.authorization.k8s.io/snapshot-controller-role created
role.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
rolebinding.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
deployment.apps/snapshot-controller created
stderr:
error: resource mapping not found for name: "csi-hostpath-snapclass" namespace: "" from "/etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml": no matches for kind "VolumeSnapshotClass" in version "snapshot.storage.k8s.io/v1"
ensure CRDs are installed first
I0923 10:21:40.468481 11849 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: (10.512796463s)
I0923 10:21:40.468882 11849 retry.go:31] will retry after 144.693801ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: Process exited with status 1
stdout:
customresourcedefinition.apiextensions.k8s.io/volumesnapshotclasses.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshotcontents.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshots.snapshot.storage.k8s.io created
serviceaccount/snapshot-controller created
clusterrole.rbac.authorization.k8s.io/snapshot-controller-runner created
clusterrolebinding.rbac.authorization.k8s.io/snapshot-controller-role created
role.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
rolebinding.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
deployment.apps/snapshot-controller created
stderr:
error: resource mapping not found for name: "csi-hostpath-snapclass" namespace: "" from "/etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml": no matches for kind "VolumeSnapshotClass" in version "snapshot.storage.k8s.io/v1"
ensure CRDs are installed first
I0923 10:21:40.468935 11849 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/yakd-ns.yaml -f /etc/kubernetes/addons/yakd-sa.yaml -f /etc/kubernetes/addons/yakd-crb.yaml -f /etc/kubernetes/addons/yakd-svc.yaml -f /etc/kubernetes/addons/yakd-dp.yaml: (8.41592831s)
I0923 10:21:40.468731 11849 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: (9.513182182s)
I0923 10:21:40.469036 11849 addons.go:475] Verifying addon metrics-server=true in "addons-071702"
I0923 10:21:40.469131 11849 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ig-namespace.yaml -f /etc/kubernetes/addons/ig-serviceaccount.yaml -f /etc/kubernetes/addons/ig-role.yaml -f /etc/kubernetes/addons/ig-rolebinding.yaml -f /etc/kubernetes/addons/ig-clusterrole.yaml -f /etc/kubernetes/addons/ig-clusterrolebinding.yaml -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-daemonset.yaml: (7.113741125s)
I0923 10:21:40.472246 11849 out.go:177] * Verifying ingress addon...
I0923 10:21:40.472368 11849 out.go:177] * To access YAKD - Kubernetes Dashboard, wait for Pod to be ready and run the following command:
minikube -p addons-071702 service yakd-dashboard -n yakd-dashboard
I0923 10:21:40.472419 11849 out.go:177] * Verifying registry addon...
I0923 10:21:40.474710 11849 kapi.go:75] Waiting for pod with label "app.kubernetes.io/name=ingress-nginx" in ns "ingress-nginx" ...
I0923 10:21:40.475852 11849 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=registry" in ns "kube-system" ...
W0923 10:21:40.479129 11849 out.go:270] ! Enabling 'storage-provisioner-rancher' returned an error: running callbacks: [Error making local-path the default storage class: Error while marking storage class local-path as default: Operation cannot be fulfilled on storageclasses.storage.k8s.io "local-path": the object has been modified; please apply your changes to the latest version and try again]
I0923 10:21:40.479618 11849 kapi.go:86] Found 2 Pods for label selector kubernetes.io/minikube-addons=registry
I0923 10:21:40.479640 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:21:40.480775 11849 kapi.go:86] Found 3 Pods for label selector app.kubernetes.io/name=ingress-nginx
I0923 10:21:40.480799 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:21:40.614323 11849 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply --force -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I0923 10:21:40.979199 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:21:40.980590 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:21:41.479825 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:21:41.480346 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:21:41.576244 11849 pod_ready.go:103] pod "coredns-7c65d6cfc9-hd4pw" in "kube-system" namespace has status "Ready":"False"
I0923 10:21:41.681724 11849 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/rbac-external-attacher.yaml -f /etc/kubernetes/addons/rbac-hostpath.yaml -f /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml -f /etc/kubernetes/addons/rbac-external-provisioner.yaml -f /etc/kubernetes/addons/rbac-external-resizer.yaml -f /etc/kubernetes/addons/rbac-external-snapshotter.yaml -f /etc/kubernetes/addons/csi-hostpath-attacher.yaml -f /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml -f /etc/kubernetes/addons/csi-hostpath-plugin.yaml -f /etc/kubernetes/addons/csi-hostpath-resizer.yaml -f /etc/kubernetes/addons/csi-hostpath-storageclass.yaml: (7.728861681s)
I0923 10:21:41.681763 11849 addons.go:475] Verifying addon csi-hostpath-driver=true in "addons-071702"
I0923 10:21:41.681793 11849 ssh_runner.go:235] Completed: cat /var/lib/minikube/google_application_credentials.json: (4.595580743s)
I0923 10:21:41.683960 11849 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.3
I0923 10:21:41.683966 11849 out.go:177] * Verifying csi-hostpath-driver addon...
I0923 10:21:41.685228 11849 out.go:177] - Using image gcr.io/k8s-minikube/gcp-auth-webhook:v0.1.2
I0923 10:21:41.685847 11849 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=csi-hostpath-driver" in ns "kube-system" ...
I0923 10:21:41.686409 11849 addons.go:431] installing /etc/kubernetes/addons/gcp-auth-ns.yaml
I0923 10:21:41.686422 11849 ssh_runner.go:362] scp gcp-auth/gcp-auth-ns.yaml --> /etc/kubernetes/addons/gcp-auth-ns.yaml (700 bytes)
I0923 10:21:41.755094 11849 kapi.go:86] Found 3 Pods for label selector kubernetes.io/minikube-addons=csi-hostpath-driver
I0923 10:21:41.755181 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:21:41.768662 11849 addons.go:431] installing /etc/kubernetes/addons/gcp-auth-service.yaml
I0923 10:21:41.768689 11849 ssh_runner.go:362] scp gcp-auth/gcp-auth-service.yaml --> /etc/kubernetes/addons/gcp-auth-service.yaml (788 bytes)
I0923 10:21:41.857274 11849 addons.go:431] installing /etc/kubernetes/addons/gcp-auth-webhook.yaml
I0923 10:21:41.857304 11849 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/gcp-auth-webhook.yaml (5421 bytes)
I0923 10:21:41.879789 11849 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/gcp-auth-ns.yaml -f /etc/kubernetes/addons/gcp-auth-service.yaml -f /etc/kubernetes/addons/gcp-auth-webhook.yaml
I0923 10:21:41.979930 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:21:41.980257 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:21:42.256561 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:21:42.483317 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:21:42.484096 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:21:42.756367 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:21:42.857652 11849 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply --force -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: (2.243256292s)
I0923 10:21:42.979484 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:21:42.979672 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:21:43.175993 11849 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/gcp-auth-ns.yaml -f /etc/kubernetes/addons/gcp-auth-service.yaml -f /etc/kubernetes/addons/gcp-auth-webhook.yaml: (1.296149037s)
I0923 10:21:43.177895 11849 addons.go:475] Verifying addon gcp-auth=true in "addons-071702"
I0923 10:21:43.179435 11849 out.go:177] * Verifying gcp-auth addon...
I0923 10:21:43.181813 11849 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=gcp-auth" in ns "gcp-auth" ...
I0923 10:21:43.183813 11849 kapi.go:86] Found 0 Pods for label selector kubernetes.io/minikube-addons=gcp-auth
I0923 10:21:43.189797 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:21:43.478656 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:21:43.478841 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:21:43.689921 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:21:43.980375 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:21:43.980703 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:21:44.072682 11849 pod_ready.go:103] pod "coredns-7c65d6cfc9-hd4pw" in "kube-system" namespace has status "Ready":"False"
I0923 10:21:44.189845 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:21:44.479422 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:21:44.479807 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:21:44.689357 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:21:44.979240 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:21:44.979288 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:21:45.189640 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:21:45.479203 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:21:45.479398 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:21:45.689707 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:21:45.978915 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:21:45.979127 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:21:46.286773 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:21:46.479365 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:21:46.479522 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:21:46.572560 11849 pod_ready.go:103] pod "coredns-7c65d6cfc9-hd4pw" in "kube-system" namespace has status "Ready":"False"
I0923 10:21:46.689608 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:21:46.977921 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:21:46.979288 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:21:47.189236 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:21:47.480040 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:21:47.481721 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:21:47.690872 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:21:47.979480 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:21:47.980276 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:21:48.190811 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:21:48.479757 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:21:48.480856 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:21:48.573592 11849 pod_ready.go:103] pod "coredns-7c65d6cfc9-hd4pw" in "kube-system" namespace has status "Ready":"False"
I0923 10:21:48.690447 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:21:48.979328 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:21:48.979646 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:21:49.189818 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:21:49.478231 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:21:49.478387 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:21:49.689776 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:21:49.978946 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:21:49.978984 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:21:50.190716 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:21:50.481026 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:21:50.481311 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:21:50.689651 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:21:50.978733 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:21:50.978977 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:21:51.072423 11849 pod_ready.go:103] pod "coredns-7c65d6cfc9-hd4pw" in "kube-system" namespace has status "Ready":"False"
I0923 10:21:51.189688 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:21:51.478558 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:21:51.478607 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:21:51.690435 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:21:51.978804 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:21:51.978851 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:21:52.219448 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:21:52.479056 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:21:52.479163 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:21:52.689688 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:21:52.978822 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:21:52.978949 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:21:53.190347 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:21:53.479147 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:21:53.479401 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:21:53.572934 11849 pod_ready.go:103] pod "coredns-7c65d6cfc9-hd4pw" in "kube-system" namespace has status "Ready":"False"
I0923 10:21:53.689055 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:21:53.979477 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:21:53.979869 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:21:54.189674 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:21:54.478786 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:21:54.479114 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:21:54.691050 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:21:54.978825 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:21:54.978907 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:21:55.188938 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:21:55.478883 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:21:55.479379 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:21:55.690342 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:21:55.979126 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:21:55.979213 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:21:56.072783 11849 pod_ready.go:103] pod "coredns-7c65d6cfc9-hd4pw" in "kube-system" namespace has status "Ready":"False"
I0923 10:21:56.189946 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:21:56.478439 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:21:56.478480 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:21:56.718953 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:21:56.978832 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:21:56.978953 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:21:57.286662 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:21:57.478264 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:21:57.478887 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:21:57.689517 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:21:57.978984 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:21:57.979294 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:21:58.073187 11849 pod_ready.go:103] pod "coredns-7c65d6cfc9-hd4pw" in "kube-system" namespace has status "Ready":"False"
I0923 10:21:58.190012 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:21:58.478939 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:21:58.479110 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:21:58.689803 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:21:58.978522 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:21:58.979007 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:21:59.189318 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:21:59.479083 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:21:59.479341 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:21:59.689902 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:21:59.979053 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:21:59.979267 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:00.189654 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:00.478476 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:22:00.478721 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:00.572699 11849 pod_ready.go:103] pod "coredns-7c65d6cfc9-hd4pw" in "kube-system" namespace has status "Ready":"False"
I0923 10:22:00.689039 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:00.978382 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:00.978919 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:22:01.189197 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:01.478856 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:22:01.479227 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:01.690087 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:01.979626 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:22:01.979834 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:02.190387 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:02.479073 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:02.479141 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:22:02.573231 11849 pod_ready.go:103] pod "coredns-7c65d6cfc9-hd4pw" in "kube-system" namespace has status "Ready":"False"
I0923 10:22:02.690033 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:02.979020 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:02.979798 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:22:03.190117 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:03.478848 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:03.479507 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:22:03.690407 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:03.978722 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:22:03.978855 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:04.189823 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:04.478626 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:04.478873 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:22:04.691531 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:04.979126 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0923 10:22:04.979356 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:05.073187 11849 pod_ready.go:103] pod "coredns-7c65d6cfc9-hd4pw" in "kube-system" namespace has status "Ready":"False"
I0923 10:22:05.189922 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:05.479139 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:05.479365 11849 kapi.go:107] duration metric: took 25.003510418s to wait for kubernetes.io/minikube-addons=registry ...
I0923 10:22:05.689255 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:05.978832 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:06.189713 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:06.478712 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:06.689519 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:06.979031 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:07.073645 11849 pod_ready.go:103] pod "coredns-7c65d6cfc9-hd4pw" in "kube-system" namespace has status "Ready":"False"
I0923 10:22:07.190112 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:07.487809 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:07.690011 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:07.979004 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:08.190319 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:08.478859 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:08.689463 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:08.979018 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:09.189280 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:09.479040 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:09.572441 11849 pod_ready.go:103] pod "coredns-7c65d6cfc9-hd4pw" in "kube-system" namespace has status "Ready":"False"
I0923 10:22:09.689606 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:09.979374 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:10.189309 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:10.479017 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:10.690132 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:10.979292 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:11.073349 11849 pod_ready.go:93] pod "coredns-7c65d6cfc9-hd4pw" in "kube-system" namespace has status "Ready":"True"
I0923 10:22:11.073374 11849 pod_ready.go:82] duration metric: took 40.506164292s for pod "coredns-7c65d6cfc9-hd4pw" in "kube-system" namespace to be "Ready" ...
I0923 10:22:11.073386 11849 pod_ready.go:79] waiting up to 6m0s for pod "coredns-7c65d6cfc9-szwtj" in "kube-system" namespace to be "Ready" ...
I0923 10:22:11.076765 11849 pod_ready.go:98] error getting pod "coredns-7c65d6cfc9-szwtj" in "kube-system" namespace (skipping!): pods "coredns-7c65d6cfc9-szwtj" not found
I0923 10:22:11.076804 11849 pod_ready.go:82] duration metric: took 3.408582ms for pod "coredns-7c65d6cfc9-szwtj" in "kube-system" namespace to be "Ready" ...
E0923 10:22:11.076817 11849 pod_ready.go:67] WaitExtra: waitPodCondition: error getting pod "coredns-7c65d6cfc9-szwtj" in "kube-system" namespace (skipping!): pods "coredns-7c65d6cfc9-szwtj" not found
I0923 10:22:11.076827 11849 pod_ready.go:79] waiting up to 6m0s for pod "etcd-addons-071702" in "kube-system" namespace to be "Ready" ...
I0923 10:22:11.080923 11849 pod_ready.go:93] pod "etcd-addons-071702" in "kube-system" namespace has status "Ready":"True"
I0923 10:22:11.080945 11849 pod_ready.go:82] duration metric: took 4.109957ms for pod "etcd-addons-071702" in "kube-system" namespace to be "Ready" ...
I0923 10:22:11.080956 11849 pod_ready.go:79] waiting up to 6m0s for pod "kube-apiserver-addons-071702" in "kube-system" namespace to be "Ready" ...
I0923 10:22:11.085017 11849 pod_ready.go:93] pod "kube-apiserver-addons-071702" in "kube-system" namespace has status "Ready":"True"
I0923 10:22:11.085039 11849 pod_ready.go:82] duration metric: took 4.074001ms for pod "kube-apiserver-addons-071702" in "kube-system" namespace to be "Ready" ...
I0923 10:22:11.085051 11849 pod_ready.go:79] waiting up to 6m0s for pod "kube-controller-manager-addons-071702" in "kube-system" namespace to be "Ready" ...
I0923 10:22:11.088995 11849 pod_ready.go:93] pod "kube-controller-manager-addons-071702" in "kube-system" namespace has status "Ready":"True"
I0923 10:22:11.089016 11849 pod_ready.go:82] duration metric: took 3.956391ms for pod "kube-controller-manager-addons-071702" in "kube-system" namespace to be "Ready" ...
I0923 10:22:11.089026 11849 pod_ready.go:79] waiting up to 6m0s for pod "kube-proxy-gsgwd" in "kube-system" namespace to be "Ready" ...
I0923 10:22:11.189665 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:11.270601 11849 pod_ready.go:93] pod "kube-proxy-gsgwd" in "kube-system" namespace has status "Ready":"True"
I0923 10:22:11.270626 11849 pod_ready.go:82] duration metric: took 181.591464ms for pod "kube-proxy-gsgwd" in "kube-system" namespace to be "Ready" ...
I0923 10:22:11.270639 11849 pod_ready.go:79] waiting up to 6m0s for pod "kube-scheduler-addons-071702" in "kube-system" namespace to be "Ready" ...
I0923 10:22:11.478791 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:11.670599 11849 pod_ready.go:93] pod "kube-scheduler-addons-071702" in "kube-system" namespace has status "Ready":"True"
I0923 10:22:11.670623 11849 pod_ready.go:82] duration metric: took 399.975445ms for pod "kube-scheduler-addons-071702" in "kube-system" namespace to be "Ready" ...
I0923 10:22:11.670633 11849 pod_ready.go:39] duration metric: took 41.11170856s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0923 10:22:11.670656 11849 api_server.go:52] waiting for apiserver process to appear ...
I0923 10:22:11.670731 11849 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0923 10:22:11.684118 11849 api_server.go:72] duration metric: took 42.976532631s to wait for apiserver process to appear ...
I0923 10:22:11.684145 11849 api_server.go:88] waiting for apiserver healthz status ...
I0923 10:22:11.684168 11849 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8443/healthz ...
I0923 10:22:11.687834 11849 api_server.go:279] https://192.168.49.2:8443/healthz returned 200:
ok
I0923 10:22:11.688628 11849 api_server.go:141] control plane version: v1.31.1
I0923 10:22:11.688653 11849 api_server.go:131] duration metric: took 4.499932ms to wait for apiserver health ...
I0923 10:22:11.688661 11849 system_pods.go:43] waiting for kube-system pods to appear ...
I0923 10:22:11.689659 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:11.874566 11849 system_pods.go:59] 17 kube-system pods found
I0923 10:22:11.874593 11849 system_pods.go:61] "coredns-7c65d6cfc9-hd4pw" [53fdec36-508d-40c2-9b22-80f6afc1976b] Running
I0923 10:22:11.874602 11849 system_pods.go:61] "csi-hostpath-attacher-0" [da0cebec-a9ae-4226-aa37-c12c18ed4683] Pending / Ready:ContainersNotReady (containers with unready status: [csi-attacher]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-attacher])
I0923 10:22:11.874609 11849 system_pods.go:61] "csi-hostpath-resizer-0" [58d839a9-8c23-473d-bcea-6bb514783b23] Pending / Ready:ContainersNotReady (containers with unready status: [csi-resizer]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-resizer])
I0923 10:22:11.874616 11849 system_pods.go:61] "csi-hostpathplugin-nm6zh" [2897db1d-3abb-4194-a021-6febc9c88430] Pending / Ready:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter])
I0923 10:22:11.874626 11849 system_pods.go:61] "etcd-addons-071702" [e3b672b2-f950-4103-bb7b-5852cbd23171] Running
I0923 10:22:11.874630 11849 system_pods.go:61] "kube-apiserver-addons-071702" [edae2c11-e976-40e5-995a-349287adf5af] Running
I0923 10:22:11.874637 11849 system_pods.go:61] "kube-controller-manager-addons-071702" [74585c60-486f-49ca-b8d1-3d88fafcfd85] Running
I0923 10:22:11.874642 11849 system_pods.go:61] "kube-ingress-dns-minikube" [7a06c605-470c-4010-9782-8ddb472a082d] Running
I0923 10:22:11.874645 11849 system_pods.go:61] "kube-proxy-gsgwd" [da9f33ce-241c-4457-87e2-b90aaf06b0ce] Running
I0923 10:22:11.874649 11849 system_pods.go:61] "kube-scheduler-addons-071702" [d7dfada2-ae46-4f5f-bd80-4bc44bf4aa9c] Running
I0923 10:22:11.874654 11849 system_pods.go:61] "metrics-server-84c5f94fbc-4l9jp" [04190cb8-ca8f-459e-bb88-24aa8a774d1d] Running
I0923 10:22:11.874657 11849 system_pods.go:61] "nvidia-device-plugin-daemonset-kxghd" [959908a1-ff48-4627-ad29-d0d6134865d7] Running
I0923 10:22:11.874662 11849 system_pods.go:61] "registry-66c9cd494c-sswjh" [42afc9e2-bf4a-4c0a-9db9-8f58e617fcbe] Running
I0923 10:22:11.874665 11849 system_pods.go:61] "registry-proxy-w6x4v" [8964449b-425a-4614-aa7b-d6cc98a185c7] Running
I0923 10:22:11.874696 11849 system_pods.go:61] "snapshot-controller-56fcc65765-cvdcs" [306aebc0-cf33-45f7-8669-d354b0ae713c] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
I0923 10:22:11.874708 11849 system_pods.go:61] "snapshot-controller-56fcc65765-zfsbk" [6f50a1f3-ec1f-4d33-8fa9-9379bfc44a79] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
I0923 10:22:11.874718 11849 system_pods.go:61] "storage-provisioner" [b9297771-204e-479f-9aa4-da05fb25f230] Running
I0923 10:22:11.874726 11849 system_pods.go:74] duration metric: took 186.058115ms to wait for pod list to return data ...
I0923 10:22:11.874737 11849 default_sa.go:34] waiting for default service account to be created ...
I0923 10:22:11.979093 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:12.069751 11849 default_sa.go:45] found service account: "default"
I0923 10:22:12.069776 11849 default_sa.go:55] duration metric: took 195.032177ms for default service account to be created ...
I0923 10:22:12.069785 11849 system_pods.go:116] waiting for k8s-apps to be running ...
I0923 10:22:12.189931 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:12.277344 11849 system_pods.go:86] 17 kube-system pods found
I0923 10:22:12.277379 11849 system_pods.go:89] "coredns-7c65d6cfc9-hd4pw" [53fdec36-508d-40c2-9b22-80f6afc1976b] Running
I0923 10:22:12.277391 11849 system_pods.go:89] "csi-hostpath-attacher-0" [da0cebec-a9ae-4226-aa37-c12c18ed4683] Pending / Ready:ContainersNotReady (containers with unready status: [csi-attacher]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-attacher])
I0923 10:22:12.277401 11849 system_pods.go:89] "csi-hostpath-resizer-0" [58d839a9-8c23-473d-bcea-6bb514783b23] Pending / Ready:ContainersNotReady (containers with unready status: [csi-resizer]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-resizer])
I0923 10:22:12.277412 11849 system_pods.go:89] "csi-hostpathplugin-nm6zh" [2897db1d-3abb-4194-a021-6febc9c88430] Pending / Ready:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter])
I0923 10:22:12.277420 11849 system_pods.go:89] "etcd-addons-071702" [e3b672b2-f950-4103-bb7b-5852cbd23171] Running
I0923 10:22:12.277426 11849 system_pods.go:89] "kube-apiserver-addons-071702" [edae2c11-e976-40e5-995a-349287adf5af] Running
I0923 10:22:12.277432 11849 system_pods.go:89] "kube-controller-manager-addons-071702" [74585c60-486f-49ca-b8d1-3d88fafcfd85] Running
I0923 10:22:12.277437 11849 system_pods.go:89] "kube-ingress-dns-minikube" [7a06c605-470c-4010-9782-8ddb472a082d] Running
I0923 10:22:12.277444 11849 system_pods.go:89] "kube-proxy-gsgwd" [da9f33ce-241c-4457-87e2-b90aaf06b0ce] Running
I0923 10:22:12.277450 11849 system_pods.go:89] "kube-scheduler-addons-071702" [d7dfada2-ae46-4f5f-bd80-4bc44bf4aa9c] Running
I0923 10:22:12.277459 11849 system_pods.go:89] "metrics-server-84c5f94fbc-4l9jp" [04190cb8-ca8f-459e-bb88-24aa8a774d1d] Running
I0923 10:22:12.277465 11849 system_pods.go:89] "nvidia-device-plugin-daemonset-kxghd" [959908a1-ff48-4627-ad29-d0d6134865d7] Running
I0923 10:22:12.277473 11849 system_pods.go:89] "registry-66c9cd494c-sswjh" [42afc9e2-bf4a-4c0a-9db9-8f58e617fcbe] Running
I0923 10:22:12.277479 11849 system_pods.go:89] "registry-proxy-w6x4v" [8964449b-425a-4614-aa7b-d6cc98a185c7] Running
I0923 10:22:12.277489 11849 system_pods.go:89] "snapshot-controller-56fcc65765-cvdcs" [306aebc0-cf33-45f7-8669-d354b0ae713c] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
I0923 10:22:12.277501 11849 system_pods.go:89] "snapshot-controller-56fcc65765-zfsbk" [6f50a1f3-ec1f-4d33-8fa9-9379bfc44a79] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
I0923 10:22:12.277507 11849 system_pods.go:89] "storage-provisioner" [b9297771-204e-479f-9aa4-da05fb25f230] Running
I0923 10:22:12.277519 11849 system_pods.go:126] duration metric: took 207.727592ms to wait for k8s-apps to be running ...
I0923 10:22:12.277531 11849 system_svc.go:44] waiting for kubelet service to be running ....
I0923 10:22:12.277583 11849 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I0923 10:22:12.288993 11849 system_svc.go:56] duration metric: took 11.45412ms WaitForService to wait for kubelet
I0923 10:22:12.289022 11849 kubeadm.go:582] duration metric: took 43.581437962s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0923 10:22:12.289044 11849 node_conditions.go:102] verifying NodePressure condition ...
I0923 10:22:12.471580 11849 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I0923 10:22:12.471607 11849 node_conditions.go:123] node cpu capacity is 8
I0923 10:22:12.471619 11849 node_conditions.go:105] duration metric: took 182.569018ms to run NodePressure ...
I0923 10:22:12.471638 11849 start.go:241] waiting for startup goroutines ...
I0923 10:22:12.478875 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:12.690306 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:12.978802 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:13.289010 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:13.479885 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:13.787874 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:13.979912 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:14.191554 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:14.480386 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:14.689580 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:14.978898 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:15.189383 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:15.478434 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:15.689383 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:15.980056 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:16.190665 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:16.479545 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:16.689763 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:16.979407 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:17.189639 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:17.479161 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:17.689741 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:17.980050 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:18.189269 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:18.478956 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:18.690933 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:18.979478 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:19.189524 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:19.479516 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:19.689484 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:19.978077 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:20.189681 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:20.479016 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:20.690194 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:20.979388 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:21.189868 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:21.478931 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:21.689122 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:21.978622 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:22.190551 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:22.514292 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:22.688601 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:22.978441 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:23.189955 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:23.478782 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:23.689997 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:23.979239 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:24.190359 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:24.479756 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:24.689822 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:24.979679 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:25.190281 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:25.479518 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:25.689834 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:25.979885 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:26.255176 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:26.478905 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:26.690455 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:26.978047 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:27.190064 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:27.479332 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:27.689909 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:27.979586 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:28.189501 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:28.479441 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:28.688935 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:28.978858 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:29.189917 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:29.479417 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:29.689501 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0923 10:22:29.978967 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:30.189251 11849 kapi.go:107] duration metric: took 48.50339918s to wait for kubernetes.io/minikube-addons=csi-hostpath-driver ...
I0923 10:22:30.478541 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:30.979203 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:31.478960 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:31.977793 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:32.479138 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:32.978318 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:33.478355 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:33.978184 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:34.477850 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:34.979371 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:35.479043 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:35.979198 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:36.478911 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:36.978975 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:37.478550 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:37.978825 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:38.478222 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:38.978445 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:39.478449 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:39.978605 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:40.478382 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:40.978473 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:41.478525 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:41.978946 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:42.480557 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:42.978997 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:43.479821 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:43.979289 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:44.479331 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:44.979919 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:45.478836 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:45.979489 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:46.478958 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:46.978470 11849 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0923 10:22:47.481100 11849 kapi.go:107] duration metric: took 1m7.006388582s to wait for app.kubernetes.io/name=ingress-nginx ...
I0923 10:23:05.685518 11849 kapi.go:86] Found 1 Pods for label selector kubernetes.io/minikube-addons=gcp-auth
I0923 10:23:05.685538 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:06.185070 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:06.685190 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:07.185166 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:07.685462 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:08.185220 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:08.684831 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:09.185180 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:09.684762 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:10.185712 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:10.684770 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:11.184403 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:11.684934 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:12.184556 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:12.685720 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:13.185154 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:13.685207 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:14.184764 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:14.684381 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:15.185633 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:15.685035 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:16.184662 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:16.685383 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:17.185723 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:17.685623 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:18.185780 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:18.684775 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:19.185128 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:19.685527 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:20.185690 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:20.685196 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:21.185317 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:21.685275 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:22.185260 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:22.684957 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:23.184699 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:23.684507 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:24.185638 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:24.684826 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:25.184811 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:25.685388 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:26.185266 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:26.685105 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:27.185160 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:27.685254 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:28.185311 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:28.685503 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:29.184877 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:29.685230 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:30.184854 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:30.684629 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:31.186008 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:31.684858 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:32.184525 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:32.685611 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:33.185718 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:33.684680 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:34.186050 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:34.684712 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:35.184958 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:35.685491 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:36.185265 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:36.685109 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:37.184872 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:37.684677 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:38.184727 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:38.684493 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:39.184822 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:39.684673 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:40.185787 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:40.684885 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:41.185102 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:41.684762 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:42.185420 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:42.685556 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:43.185884 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:43.685465 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:44.185126 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:44.685090 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:45.185203 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:45.685335 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:46.185384 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:46.685826 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:47.184819 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:47.684856 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:48.185425 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:48.685456 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:49.185696 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:49.685777 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:50.184922 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:50.684928 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:51.185002 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:51.685003 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:52.184980 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:52.685170 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:53.185279 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:53.685222 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:54.185249 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:54.685445 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:55.185974 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:55.685642 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:56.185021 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:56.684588 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:57.185662 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:57.684982 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:58.185082 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:58.684773 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:59.185178 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:23:59.684899 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:24:00.185005 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:24:00.684855 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:24:01.184953 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:24:01.684718 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:24:02.184413 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:24:02.685397 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:24:03.185502 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:24:03.685386 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:24:04.185084 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:24:04.684951 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:24:05.185115 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:24:05.685337 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:24:06.185583 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:24:06.685564 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:24:07.185713 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:24:07.684775 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:24:08.184788 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:24:08.685473 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:24:09.185387 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:24:09.685720 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:24:10.185083 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:24:10.685746 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:24:11.185513 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:24:11.686018 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:24:12.185422 11849 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0923 10:24:12.698458 11849 kapi.go:107] duration metric: took 2m29.516642295s to wait for kubernetes.io/minikube-addons=gcp-auth ...
I0923 10:24:12.714404 11849 out.go:177] * Your GCP credentials will now be mounted into every pod created in the addons-071702 cluster.
I0923 10:24:12.716422 11849 out.go:177] * If you don't want your credentials mounted into a specific pod, add a label with the `gcp-auth-skip-secret` key to your pod configuration.
I0923 10:24:12.718018 11849 out.go:177] * If you want existing pods to be mounted with credentials, either recreate them or rerun addons enable with --refresh.
I0923 10:24:12.719199 11849 out.go:177] * Enabled addons: ingress-dns, storage-provisioner, volcano, nvidia-device-plugin, cloud-spanner, metrics-server, inspektor-gadget, yakd, default-storageclass, volumesnapshots, registry, csi-hostpath-driver, ingress, gcp-auth
I0923 10:24:12.720228 11849 addons.go:510] duration metric: took 2m44.012571718s for enable addons: enabled=[ingress-dns storage-provisioner volcano nvidia-device-plugin cloud-spanner metrics-server inspektor-gadget yakd default-storageclass volumesnapshots registry csi-hostpath-driver ingress gcp-auth]
I0923 10:24:12.720264 11849 start.go:246] waiting for cluster config update ...
I0923 10:24:12.720290 11849 start.go:255] writing updated cluster config ...
I0923 10:24:12.720553 11849 ssh_runner.go:195] Run: rm -f paused
I0923 10:24:12.800417 11849 start.go:600] kubectl: 1.31.1, cluster: 1.31.1 (minor skew: 0)
I0923 10:24:12.802276 11849 out.go:177] * Done! kubectl is now configured to use "addons-071702" cluster and "default" namespace by default
==> Docker <==
Sep 23 10:33:45 addons-071702 cri-dockerd[1611]: time="2024-09-23T10:33:45Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"task-pv-pod-restore_default\": unexpected command output Device \"eth0\" does not exist.\n with error: exit status 1"
Sep 23 10:33:45 addons-071702 dockerd[1345]: time="2024-09-23T10:33:45.333522605Z" level=info msg="ignoring event" container=df8c881ec8ffc48561b54d365867856f8920bb054556b2db735213e6e333a0d6 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 23 10:33:47 addons-071702 dockerd[1345]: time="2024-09-23T10:33:47.373184251Z" level=info msg="ignoring event" container=55fce2e386d5e14f3235735c779acf3136bb7b7dddcfe4ea2222960012ad2aa3 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 23 10:33:47 addons-071702 dockerd[1345]: time="2024-09-23T10:33:47.464021709Z" level=info msg="ignoring event" container=98ad8767bad5ee2a83b6c6ff9db0f79770b716871efdee395a289b952715c583 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 23 10:33:47 addons-071702 dockerd[1345]: time="2024-09-23T10:33:47.467896699Z" level=info msg="ignoring event" container=49b0944ed0d4770ecfd1cc69a7283c91ef681cbca216ddbea6c3f33a32315c53 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 23 10:33:47 addons-071702 dockerd[1345]: time="2024-09-23T10:33:47.468681386Z" level=info msg="ignoring event" container=143912aab3482e05f9b13b298dd2cfb4692ebc9c3bcba95420d0afff9445122c module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 23 10:33:47 addons-071702 dockerd[1345]: time="2024-09-23T10:33:47.469998250Z" level=info msg="ignoring event" container=8acb1a3afff633c89788aa338731a3a82ae296593e5d006192f1d88b4aaebf42 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 23 10:33:47 addons-071702 dockerd[1345]: time="2024-09-23T10:33:47.470960528Z" level=info msg="ignoring event" container=832494167f1e94451657b1cbcdc6adbb063e7462356ec4ecb93a56e618a8e814 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 23 10:33:47 addons-071702 dockerd[1345]: time="2024-09-23T10:33:47.557232664Z" level=info msg="ignoring event" container=0861e1919fc9d7a4ba964afe657741b968c4de1f35a7ae4ba7d29bee56bd978e module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 23 10:33:47 addons-071702 dockerd[1345]: time="2024-09-23T10:33:47.558993047Z" level=info msg="ignoring event" container=e8ea65fe812686e5c472b5b301de88e49cc936d39de6d57f4ce81c98d0db5aa6 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 23 10:33:47 addons-071702 dockerd[1345]: time="2024-09-23T10:33:47.757911401Z" level=info msg="ignoring event" container=4ce9528e1ba0a4f7141362f866bf96dae2e18793585b13596d9f0235c146cf5f module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 23 10:33:47 addons-071702 dockerd[1345]: time="2024-09-23T10:33:47.783743907Z" level=info msg="ignoring event" container=1d61a217146dd96ce59b1f0b283c78dbf35774a303fe3e1b52e2c584f98f5342 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 23 10:33:47 addons-071702 dockerd[1345]: time="2024-09-23T10:33:47.804218123Z" level=info msg="ignoring event" container=77c68e23f2bc415e30fe4a4495130df868caffb1afa2c2b1d03e412dbd1bb3aa module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 23 10:33:49 addons-071702 dockerd[1345]: time="2024-09-23T10:33:49.407282934Z" level=info msg="Attempting next endpoint for pull after error: Head \"https://gcr.io/v2/k8s-minikube/busybox/manifests/latest\": unauthorized: authentication failed" spanID=2a0776a9f89a6d1b traceID=c578c5a26e36cbdf336e6ad3df3852b7
Sep 23 10:33:49 addons-071702 dockerd[1345]: time="2024-09-23T10:33:49.409495161Z" level=error msg="Handler for POST /v1.43/images/create returned error: Head \"https://gcr.io/v2/k8s-minikube/busybox/manifests/latest\": unauthorized: authentication failed" spanID=2a0776a9f89a6d1b traceID=c578c5a26e36cbdf336e6ad3df3852b7
Sep 23 10:33:53 addons-071702 dockerd[1345]: time="2024-09-23T10:33:53.665531614Z" level=info msg="ignoring event" container=421c2c6c30227975648101ff3ba3e648f0c524f1c4e91dcd43f5ef08a42088fe module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 23 10:33:53 addons-071702 dockerd[1345]: time="2024-09-23T10:33:53.673177747Z" level=info msg="ignoring event" container=cc06f4a274cf3542c8d279f74505145c961b003f0e22f454037d08bae50e18f1 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 23 10:33:53 addons-071702 dockerd[1345]: time="2024-09-23T10:33:53.812162149Z" level=info msg="ignoring event" container=0623cb23025a8274a7d1fac5b18807d6244bddf0a6325c6324e3be9709571196 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 23 10:33:53 addons-071702 dockerd[1345]: time="2024-09-23T10:33:53.841744605Z" level=info msg="ignoring event" container=5d092d0bc4cd5db062e290b3793dfe813e3a8d6355a3f0b31e53020c267696a4 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 23 10:34:05 addons-071702 dockerd[1345]: time="2024-09-23T10:34:05.130509604Z" level=info msg="ignoring event" container=535067b4bd37dd82921e916b23a53df83ac3dc734ad2b703cc20af8febe0dea2 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 23 10:34:05 addons-071702 dockerd[1345]: time="2024-09-23T10:34:05.655129553Z" level=info msg="ignoring event" container=5d114050ac1c887356b180e39c42b27a6b7eec948223cab236e11378c8ec2b63 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 23 10:34:05 addons-071702 dockerd[1345]: time="2024-09-23T10:34:05.663697802Z" level=info msg="ignoring event" container=17261e380369bd5ff191af49686648f0b3c6f981535d76cd2f9f0d864285cb08 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 23 10:34:05 addons-071702 dockerd[1345]: time="2024-09-23T10:34:05.795022012Z" level=info msg="ignoring event" container=496c7844e167ff0afc182e22f66317342db55e1269ec5830f86cdaf4fca57834 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 23 10:34:05 addons-071702 cri-dockerd[1611]: time="2024-09-23T10:34:05Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"registry-proxy-w6x4v_kube-system\": unexpected command output Device \"eth0\" does not exist.\n with error: exit status 1"
Sep 23 10:34:05 addons-071702 dockerd[1345]: time="2024-09-23T10:34:05.872207774Z" level=info msg="ignoring event" container=9dc3f70eb563e7df2aa48777163281b82fa975245e704a41063c3163bf31c7be module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD
2d033fc2cf3c6 kicbase/echo-server@sha256:127ac38a2bb9537b7f252addff209ea6801edcac8a92c8b1104dacd66a583ed6 27 seconds ago Running hello-world-app 0 f4166ecdf6a4f hello-world-app-55bf9c44b4-87prz
a4dec6063579d nginx@sha256:a5127daff3d6f4606be3100a252419bfa84fd6ee5cd74d0feaca1a5068f97dcf 38 seconds ago Running nginx 0 62b6d1fb7e0b4 nginx
2186582a836b5 gcr.io/k8s-minikube/gcp-auth-webhook@sha256:e6c5b3bc32072ea370d34c27836efd11b3519d25bd444c2a8efc339cff0e20fb 9 minutes ago Running gcp-auth 0 7572b9e72f093 gcp-auth-89d5ffd79-5njc2
7f21c32517212 registry.k8s.io/ingress-nginx/kube-webhook-certgen@sha256:a320a50cc91bd15fd2d6fa6de58bd98c1bd64b9a6f926ce23a600d87043455a3 11 minutes ago Exited patch 0 9590f220fe1ee ingress-nginx-admission-patch-5fvqk
6aa8dc6312669 registry.k8s.io/ingress-nginx/kube-webhook-certgen@sha256:a320a50cc91bd15fd2d6fa6de58bd98c1bd64b9a6f926ce23a600d87043455a3 11 minutes ago Exited create 0 4b8559e455e03 ingress-nginx-admission-create-5xvgp
17261e380369b gcr.io/k8s-minikube/kube-registry-proxy@sha256:b3fa0b2df8737fdb85ad5918a7e2652527463e357afff83a5e5bb966bcedc367 12 minutes ago Exited registry-proxy 0 9dc3f70eb563e registry-proxy-w6x4v
ca288cd1b4a28 rancher/local-path-provisioner@sha256:e34c88ae0affb1cdefbb874140d6339d4a27ec4ee420ae8199cd839997b05246 12 minutes ago Running local-path-provisioner 0 834c04c24ac98 local-path-provisioner-86d989889c-5q6hg
283a48bd8ff1e 6e38f40d628db 12 minutes ago Running storage-provisioner 0 e93d1d20f2606 storage-provisioner
20ed0161c4e82 c69fa2e9cbf5f 12 minutes ago Running coredns 0 ead993cb3a469 coredns-7c65d6cfc9-hd4pw
20bcbfe91581d 60c005f310ff3 12 minutes ago Running kube-proxy 0 3394ea776aa5b kube-proxy-gsgwd
9e05497c00f23 6bab7719df100 12 minutes ago Running kube-apiserver 0 238abf4e06771 kube-apiserver-addons-071702
cb22d2aad924c 175ffd71cce3d 12 minutes ago Running kube-controller-manager 0 1a00876a961c6 kube-controller-manager-addons-071702
9305922fd2cd3 9aa1fad941575 12 minutes ago Running kube-scheduler 0 7a61731b82779 kube-scheduler-addons-071702
dd7e3397fd9bd 2e96e5913fc06 12 minutes ago Running etcd 0 2cdd1c6d4772a etcd-addons-071702
==> coredns [20ed0161c4e8] <==
[INFO] 10.244.0.21:37527 - 27265 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.004966417s
[INFO] 10.244.0.21:34780 - 30491 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.005072356s
[INFO] 10.244.0.21:51605 - 55134 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.005505481s
[INFO] 10.244.0.21:59388 - 5211 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.005587385s
[INFO] 10.244.0.21:59720 - 43205 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.004968399s
[INFO] 10.244.0.21:51808 - 49668 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.005323956s
[INFO] 10.244.0.21:54372 - 11455 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.005069541s
[INFO] 10.244.0.21:34780 - 6185 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.004714439s
[INFO] 10.244.0.21:51605 - 26189 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.005350006s
[INFO] 10.244.0.21:37527 - 31143 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.005423282s
[INFO] 10.244.0.21:34780 - 56585 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000085415s
[INFO] 10.244.0.21:59720 - 31265 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.005331724s
[INFO] 10.244.0.21:51978 - 48738 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.005650998s
[INFO] 10.244.0.21:54372 - 35793 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.005350989s
[INFO] 10.244.0.21:51605 - 22590 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000080852s
[INFO] 10.244.0.21:59388 - 32157 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.004988657s
[INFO] 10.244.0.21:43073 - 39520 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.005812606s
[INFO] 10.244.0.21:51808 - 45954 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.005410385s
[INFO] 10.244.0.21:51978 - 52911 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.00007709s
[INFO] 10.244.0.21:59388 - 44770 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.0000632s
[INFO] 10.244.0.21:43073 - 28370 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000074533s
[INFO] 10.244.0.21:37527 - 50849 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000100353s
[INFO] 10.244.0.21:54372 - 50164 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000077962s
[INFO] 10.244.0.21:51808 - 56123 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000106503s
[INFO] 10.244.0.21:59720 - 8447 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.00006348s
==> describe nodes <==
Name: addons-071702
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=addons-071702
kubernetes.io/os=linux
minikube.k8s.io/commit=f69bf2f8ed9442c9c01edbe27466c5398c68b986
minikube.k8s.io/name=addons-071702
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2024_09_23T10_21_24_0700
minikube.k8s.io/version=v1.34.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
topology.hostpath.csi/node=addons-071702
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///var/run/cri-dockerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Mon, 23 Sep 2024 10:21:20 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: addons-071702
AcquireTime: <unset>
RenewTime: Mon, 23 Sep 2024 10:33:57 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Mon, 23 Sep 2024 10:33:57 +0000 Mon, 23 Sep 2024 10:21:19 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Mon, 23 Sep 2024 10:33:57 +0000 Mon, 23 Sep 2024 10:21:19 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Mon, 23 Sep 2024 10:33:57 +0000 Mon, 23 Sep 2024 10:21:19 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Mon, 23 Sep 2024 10:33:57 +0000 Mon, 23 Sep 2024 10:21:20 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.49.2
Hostname: addons-071702
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32859312Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32859312Ki
pods: 110
System Info:
Machine ID: 10b1783ce4ab45188955f39aa1dcf347
System UUID: 0b8cf6c3-b2c2-417d-a323-acbca8a7fc1c
Boot ID: cfa98cdb-4c43-498d-8dd6-a23453a788b2
Kernel Version: 5.15.0-1069-gcp
OS Image: Ubuntu 22.04.5 LTS
Operating System: linux
Architecture: amd64
Container Runtime Version: docker://27.3.0
Kubelet Version: v1.31.1
Kube-Proxy Version: v1.31.1
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (12 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 9m15s
default hello-world-app-55bf9c44b4-87prz 0 (0%) 0 (0%) 0 (0%) 0 (0%) 29s
default nginx 0 (0%) 0 (0%) 0 (0%) 0 (0%) 41s
gcp-auth gcp-auth-89d5ffd79-5njc2 0 (0%) 0 (0%) 0 (0%) 0 (0%) 11m
kube-system coredns-7c65d6cfc9-hd4pw 100m (1%) 0 (0%) 70Mi (0%) 170Mi (0%) 12m
kube-system etcd-addons-071702 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 12m
kube-system kube-apiserver-addons-071702 250m (3%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system kube-controller-manager-addons-071702 200m (2%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system kube-proxy-gsgwd 0 (0%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system kube-scheduler-addons-071702 100m (1%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 12m
local-path-storage local-path-provisioner-86d989889c-5q6hg 0 (0%) 0 (0%) 0 (0%) 0 (0%) 12m
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 750m (9%) 0 (0%)
memory 170Mi (0%) 170Mi (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 12m kube-proxy
Normal Starting 12m kubelet Starting kubelet.
Warning CgroupV1 12m kubelet Cgroup v1 support is in maintenance mode, please migrate to Cgroup v2.
Normal NodeAllocatableEnforced 12m kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 12m kubelet Node addons-071702 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 12m kubelet Node addons-071702 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 12m kubelet Node addons-071702 status is now: NodeHasSufficientPID
Normal RegisteredNode 12m node-controller Node addons-071702 event: Registered Node addons-071702 in Controller
==> dmesg <==
[ +0.000009] ll header: 00000000: ff ff ff ff ff ff 96 7d 3f 8f d5 f2 08 06
[ +2.025426] IPv4: martian source 10.244.0.1 from 10.244.0.17, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff 3a da b7 cd aa 75 08 06
[ +5.411075] IPv4: martian source 10.244.0.1 from 10.244.0.19, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff 42 c7 6b a5 4c 94 08 06
[ +0.286718] IPv4: martian source 10.244.0.1 from 10.244.0.18, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff 62 13 1b ac 27 38 08 06
[ +0.541061] IPv4: martian source 10.244.0.1 from 10.244.0.20, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff 5a 10 20 02 d8 8f 08 06
[ +18.624270] IPv4: martian source 10.244.0.1 from 10.244.0.21, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff 52 d7 61 9d 2d 0a 08 06
[ +1.076226] IPv4: martian source 10.244.0.1 from 10.244.0.22, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff ce 46 0b eb 24 09 08 06
[Sep23 10:23] IPv4: martian source 10.244.0.1 from 10.244.0.23, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff fa b1 04 43 d3 94 08 06
[ +0.038260] IPv4: martian source 10.244.0.1 from 10.244.0.24, on dev eth0
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff 42 ba 0a 0f 6f 42 08 06
[Sep23 10:24] IPv4: martian source 10.244.0.1 from 10.244.0.25, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff de 42 63 c4 f5 bd 08 06
[ +0.000420] IPv4: martian source 10.244.0.25 from 10.244.0.3, on dev eth0
[ +0.000004] ll header: 00000000: ff ff ff ff ff ff 3e f6 8a 3b 20 b6 08 06
[Sep23 10:33] IPv4: martian source 10.244.0.34 from 10.244.0.21, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff 52 d7 61 9d 2d 0a 08 06
[ +0.407389] IPv4: martian source 10.244.0.21 from 10.244.0.3, on dev eth0
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff 3e f6 8a 3b 20 b6 08 06
==> etcd [dd7e3397fd9b] <==
{"level":"info","ts":"2024-09-23T10:21:19.470437Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc received MsgPreVoteResp from aec36adc501070cc at term 1"}
{"level":"info","ts":"2024-09-23T10:21:19.470461Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became candidate at term 2"}
{"level":"info","ts":"2024-09-23T10:21:19.470476Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc received MsgVoteResp from aec36adc501070cc at term 2"}
{"level":"info","ts":"2024-09-23T10:21:19.470496Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became leader at term 2"}
{"level":"info","ts":"2024-09-23T10:21:19.470509Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: aec36adc501070cc elected leader aec36adc501070cc at term 2"}
{"level":"info","ts":"2024-09-23T10:21:19.471418Z","caller":"etcdserver/server.go:2118","msg":"published local member to cluster through raft","local-member-id":"aec36adc501070cc","local-member-attributes":"{Name:addons-071702 ClientURLs:[https://192.168.49.2:2379]}","request-path":"/0/members/aec36adc501070cc/attributes","cluster-id":"fa54960ea34d58be","publish-timeout":"7s"}
{"level":"info","ts":"2024-09-23T10:21:19.471448Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2024-09-23T10:21:19.471477Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2024-09-23T10:21:19.471653Z","caller":"etcdserver/server.go:2629","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2024-09-23T10:21:19.471802Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2024-09-23T10:21:19.471883Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2024-09-23T10:21:19.472321Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"fa54960ea34d58be","local-member-id":"aec36adc501070cc","cluster-version":"3.5"}
{"level":"info","ts":"2024-09-23T10:21:19.472404Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2024-09-23T10:21:19.472426Z","caller":"etcdserver/server.go:2653","msg":"cluster version is updated","cluster-version":"3.5"}
{"level":"info","ts":"2024-09-23T10:21:19.472654Z","caller":"v3rpc/health.go:61","msg":"grpc service status changed","service":"","status":"SERVING"}
{"level":"info","ts":"2024-09-23T10:21:19.472663Z","caller":"v3rpc/health.go:61","msg":"grpc service status changed","service":"","status":"SERVING"}
{"level":"info","ts":"2024-09-23T10:21:19.473806Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.49.2:2379"}
{"level":"info","ts":"2024-09-23T10:21:19.473806Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"warn","ts":"2024-09-23T10:21:31.052775Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"201.399399ms","expected-duration":"100ms","prefix":"","request":"header:<ID:8128032086761132362 username:\"kube-apiserver-etcd-client\" auth_revision:1 > txn:<compare:<target:MOD key:\"/registry/replicasets/kube-system/coredns-7c65d6cfc9\" mod_revision:364 > success:<request_put:<key:\"/registry/replicasets/kube-system/coredns-7c65d6cfc9\" value_size:3722 >> failure:<request_range:<key:\"/registry/replicasets/kube-system/coredns-7c65d6cfc9\" > >>","response":"size:16"}
{"level":"info","ts":"2024-09-23T10:21:31.052948Z","caller":"traceutil/trace.go:171","msg":"trace[2009601065] transaction","detail":"{read_only:false; response_revision:366; number_of_response:1; }","duration":"199.394436ms","start":"2024-09-23T10:21:30.853536Z","end":"2024-09-23T10:21:31.052930Z","steps":["trace[2009601065] 'process raft request' (duration: 199.329553ms)"],"step_count":1}
{"level":"info","ts":"2024-09-23T10:21:31.053009Z","caller":"traceutil/trace.go:171","msg":"trace[1502259280] transaction","detail":"{read_only:false; response_revision:365; number_of_response:1; }","duration":"301.743716ms","start":"2024-09-23T10:21:30.751239Z","end":"2024-09-23T10:21:31.052982Z","steps":["trace[1502259280] 'process raft request' (duration: 99.62689ms)","trace[1502259280] 'compare' (duration: 201.084055ms)"],"step_count":2}
{"level":"warn","ts":"2024-09-23T10:21:31.053113Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2024-09-23T10:21:30.751220Z","time spent":"301.835015ms","remote":"127.0.0.1:36346","response type":"/etcdserverpb.KV/Txn","request count":1,"request size":3782,"response count":0,"response size":40,"request content":"compare:<target:MOD key:\"/registry/replicasets/kube-system/coredns-7c65d6cfc9\" mod_revision:364 > success:<request_put:<key:\"/registry/replicasets/kube-system/coredns-7c65d6cfc9\" value_size:3722 >> failure:<request_range:<key:\"/registry/replicasets/kube-system/coredns-7c65d6cfc9\" > >"}
{"level":"info","ts":"2024-09-23T10:31:19.674928Z","caller":"mvcc/index.go:214","msg":"compact tree index","revision":1846}
{"level":"info","ts":"2024-09-23T10:31:19.698713Z","caller":"mvcc/kvstore_compaction.go:69","msg":"finished scheduled compaction","compact-revision":1846,"took":"23.227145ms","hash":1388776187,"current-db-size-bytes":9011200,"current-db-size":"9.0 MB","current-db-size-in-use-bytes":4907008,"current-db-size-in-use":"4.9 MB"}
{"level":"info","ts":"2024-09-23T10:31:19.698761Z","caller":"mvcc/hash.go:137","msg":"storing new hash","hash":1388776187,"revision":1846,"compact-revision":-1}
==> gcp-auth [2186582a836b] <==
2024/09/23 10:24:51 Ready to write response ...
2024/09/23 10:24:51 Ready to marshal response ...
2024/09/23 10:24:51 Ready to write response ...
2024/09/23 10:32:54 Ready to marshal response ...
2024/09/23 10:32:54 Ready to write response ...
2024/09/23 10:32:54 Ready to marshal response ...
2024/09/23 10:32:54 Ready to write response ...
2024/09/23 10:32:55 Ready to marshal response ...
2024/09/23 10:32:55 Ready to write response ...
2024/09/23 10:33:03 Ready to marshal response ...
2024/09/23 10:33:03 Ready to write response ...
2024/09/23 10:33:05 Ready to marshal response ...
2024/09/23 10:33:05 Ready to write response ...
2024/09/23 10:33:11 Ready to marshal response ...
2024/09/23 10:33:11 Ready to write response ...
2024/09/23 10:33:11 Ready to marshal response ...
2024/09/23 10:33:11 Ready to write response ...
2024/09/23 10:33:11 Ready to marshal response ...
2024/09/23 10:33:11 Ready to write response ...
2024/09/23 10:33:25 Ready to marshal response ...
2024/09/23 10:33:25 Ready to write response ...
2024/09/23 10:33:36 Ready to marshal response ...
2024/09/23 10:33:36 Ready to write response ...
2024/09/23 10:33:37 Ready to marshal response ...
2024/09/23 10:33:37 Ready to write response ...
==> kernel <==
10:34:06 up 16 min, 0 users, load average: 0.69, 0.44, 0.30
Linux addons-071702 5.15.0-1069-gcp #77~20.04.1-Ubuntu SMP Sun Sep 1 19:39:16 UTC 2024 x86_64 x86_64 x86_64 GNU/Linux
PRETTY_NAME="Ubuntu 22.04.5 LTS"
==> kube-apiserver [9e05497c00f2] <==
W0923 10:24:43.267833 1 cacher.go:171] Terminating all watchers from cacher queues.scheduling.volcano.sh
W0923 10:24:43.274570 1 cacher.go:171] Terminating all watchers from cacher podgroups.scheduling.volcano.sh
W0923 10:24:43.474842 1 cacher.go:171] Terminating all watchers from cacher jobs.batch.volcano.sh
W0923 10:24:43.766929 1 cacher.go:171] Terminating all watchers from cacher jobflows.flow.volcano.sh
W0923 10:24:44.098154 1 cacher.go:171] Terminating all watchers from cacher jobtemplates.flow.volcano.sh
I0923 10:33:03.257309 1 controller.go:615] quota admission added evaluator for: volumesnapshots.snapshot.storage.k8s.io
I0923 10:33:11.608625 1 alloc.go:330] "allocated clusterIPs" service="headlamp/headlamp" clusterIPs={"IPv4":"10.99.228.81"}
I0923 10:33:20.349575 1 handler.go:286] Adding GroupVersion gadget.kinvolk.io v1alpha1 to ResourceManager
W0923 10:33:21.466076 1 cacher.go:171] Terminating all watchers from cacher traces.gadget.kinvolk.io
I0923 10:33:25.789109 1 controller.go:615] quota admission added evaluator for: ingresses.networking.k8s.io
I0923 10:33:25.969294 1 alloc.go:330] "allocated clusterIPs" service="default/nginx" clusterIPs={"IPv4":"10.98.226.140"}
I0923 10:33:37.472831 1 alloc.go:330] "allocated clusterIPs" service="default/hello-world-app" clusterIPs={"IPv4":"10.102.197.144"}
I0923 10:33:53.515901 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0923 10:33:53.515955 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0923 10:33:53.528086 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0923 10:33:53.528138 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0923 10:33:53.535323 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0923 10:33:53.535364 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0923 10:33:53.540878 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0923 10:33:53.540932 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0923 10:33:53.568808 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0923 10:33:53.568845 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
W0923 10:33:54.535568 1 cacher.go:171] Terminating all watchers from cacher volumesnapshotclasses.snapshot.storage.k8s.io
W0923 10:33:54.569327 1 cacher.go:171] Terminating all watchers from cacher volumesnapshots.snapshot.storage.k8s.io
W0923 10:33:54.664783 1 cacher.go:171] Terminating all watchers from cacher volumesnapshotcontents.snapshot.storage.k8s.io
==> kube-controller-manager [cb22d2aad924] <==
E0923 10:33:55.574909 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0923 10:33:55.924421 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0923 10:33:55.924467 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0923 10:33:56.037978 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0923 10:33:56.038015 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
I0923 10:33:57.387641 1 range_allocator.go:241] "Successfully synced" logger="node-ipam-controller" key="addons-071702"
W0923 10:33:57.473042 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0923 10:33:57.473088 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
I0923 10:33:58.028917 1 shared_informer.go:313] Waiting for caches to sync for resource quota
I0923 10:33:58.028946 1 shared_informer.go:320] Caches are synced for resource quota
W0923 10:33:58.248543 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0923 10:33:58.248582 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
I0923 10:33:58.459236 1 shared_informer.go:313] Waiting for caches to sync for garbage collector
I0923 10:33:58.459275 1 shared_informer.go:320] Caches are synced for garbage collector
W0923 10:33:59.035029 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0923 10:33:59.035072 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0923 10:34:01.672564 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0923 10:34:01.672605 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0923 10:34:03.188866 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0923 10:34:03.188909 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0923 10:34:05.005340 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0923 10:34:05.005382 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
I0923 10:34:05.569373 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/registry-66c9cd494c" duration="10.518µs"
W0923 10:34:06.141056 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0923 10:34:06.141096 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
==> kube-proxy [20bcbfe91581] <==
I0923 10:21:30.663077 1 server_linux.go:66] "Using iptables proxy"
I0923 10:21:31.164609 1 server.go:677] "Successfully retrieved node IP(s)" IPs=["192.168.49.2"]
E0923 10:21:31.164683 1 server.go:234] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I0923 10:21:31.753356 1 server.go:243] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I0923 10:21:31.753422 1 server_linux.go:169] "Using iptables Proxier"
I0923 10:21:31.759564 1 proxier.go:255] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I0923 10:21:31.760036 1 server.go:483] "Version info" version="v1.31.1"
I0923 10:21:31.760057 1 server.go:485] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I0923 10:21:31.761767 1 config.go:199] "Starting service config controller"
I0923 10:21:31.761795 1 shared_informer.go:313] Waiting for caches to sync for service config
I0923 10:21:31.761834 1 config.go:105] "Starting endpoint slice config controller"
I0923 10:21:31.761848 1 shared_informer.go:313] Waiting for caches to sync for endpoint slice config
I0923 10:21:31.762407 1 config.go:328] "Starting node config controller"
I0923 10:21:31.762416 1 shared_informer.go:313] Waiting for caches to sync for node config
I0923 10:21:31.862027 1 shared_informer.go:320] Caches are synced for endpoint slice config
I0923 10:21:31.862093 1 shared_informer.go:320] Caches are synced for service config
I0923 10:21:31.866339 1 shared_informer.go:320] Caches are synced for node config
==> kube-scheduler [9305922fd2cd] <==
E0923 10:21:20.854119 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"storageclasses\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
E0923 10:21:20.854121 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicasets\" in API group \"apps\" at the cluster scope" logger="UnhandledError"
W0923 10:21:20.854168 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
E0923 10:21:20.854185 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User \"system:kube-scheduler\" cannot list resource \"nodes\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0923 10:21:20.854214 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
E0923 10:21:20.854232 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"statefulsets\" in API group \"apps\" at the cluster scope" logger="UnhandledError"
W0923 10:21:20.854263 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E0923 10:21:20.854290 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
W0923 10:21:20.853901 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
W0923 10:21:20.854265 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
E0923 10:21:20.854351 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User \"system:kube-scheduler\" cannot list resource \"services\" in API group \"\" at the cluster scope" logger="UnhandledError"
E0923 10:21:20.854388 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError"
W0923 10:21:20.854309 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E0923 10:21:20.854427 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
W0923 10:21:21.658399 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
E0923 10:21:21.658436 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError"
W0923 10:21:21.660295 1 reflector.go:561] runtime/asm_amd64.s:1695: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
E0923 10:21:21.660323 1 reflector.go:158] "Unhandled Error" err="runtime/asm_amd64.s:1695: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"extension-apiserver-authentication\" is forbidden: User \"system:kube-scheduler\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\"" logger="UnhandledError"
W0923 10:21:21.672382 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
E0923 10:21:21.672410 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicasets\" in API group \"apps\" at the cluster scope" logger="UnhandledError"
W0923 10:21:21.701023 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E0923 10:21:21.701059 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
W0923 10:21:21.705247 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E0923 10:21:21.705279 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
I0923 10:21:24.475376 1 shared_informer.go:320] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Sep 23 10:33:54 addons-071702 kubelet[2456]: I0923 10:33:54.589026 2456 scope.go:117] "RemoveContainer" containerID="cc06f4a274cf3542c8d279f74505145c961b003f0e22f454037d08bae50e18f1"
Sep 23 10:33:54 addons-071702 kubelet[2456]: I0923 10:33:54.603241 2456 scope.go:117] "RemoveContainer" containerID="cc06f4a274cf3542c8d279f74505145c961b003f0e22f454037d08bae50e18f1"
Sep 23 10:33:54 addons-071702 kubelet[2456]: E0923 10:33:54.603941 2456 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = Unknown desc = Error response from daemon: No such container: cc06f4a274cf3542c8d279f74505145c961b003f0e22f454037d08bae50e18f1" containerID="cc06f4a274cf3542c8d279f74505145c961b003f0e22f454037d08bae50e18f1"
Sep 23 10:33:54 addons-071702 kubelet[2456]: I0923 10:33:54.603971 2456 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"docker","ID":"cc06f4a274cf3542c8d279f74505145c961b003f0e22f454037d08bae50e18f1"} err="failed to get container status \"cc06f4a274cf3542c8d279f74505145c961b003f0e22f454037d08bae50e18f1\": rpc error: code = Unknown desc = Error response from daemon: No such container: cc06f4a274cf3542c8d279f74505145c961b003f0e22f454037d08bae50e18f1"
Sep 23 10:33:54 addons-071702 kubelet[2456]: I0923 10:33:54.603991 2456 scope.go:117] "RemoveContainer" containerID="421c2c6c30227975648101ff3ba3e648f0c524f1c4e91dcd43f5ef08a42088fe"
Sep 23 10:33:54 addons-071702 kubelet[2456]: I0923 10:33:54.614440 2456 scope.go:117] "RemoveContainer" containerID="421c2c6c30227975648101ff3ba3e648f0c524f1c4e91dcd43f5ef08a42088fe"
Sep 23 10:33:54 addons-071702 kubelet[2456]: E0923 10:33:54.615018 2456 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = Unknown desc = Error response from daemon: No such container: 421c2c6c30227975648101ff3ba3e648f0c524f1c4e91dcd43f5ef08a42088fe" containerID="421c2c6c30227975648101ff3ba3e648f0c524f1c4e91dcd43f5ef08a42088fe"
Sep 23 10:33:54 addons-071702 kubelet[2456]: I0923 10:33:54.615054 2456 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"docker","ID":"421c2c6c30227975648101ff3ba3e648f0c524f1c4e91dcd43f5ef08a42088fe"} err="failed to get container status \"421c2c6c30227975648101ff3ba3e648f0c524f1c4e91dcd43f5ef08a42088fe\": rpc error: code = Unknown desc = Error response from daemon: No such container: 421c2c6c30227975648101ff3ba3e648f0c524f1c4e91dcd43f5ef08a42088fe"
Sep 23 10:33:55 addons-071702 kubelet[2456]: I0923 10:33:55.268031 2456 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="306aebc0-cf33-45f7-8669-d354b0ae713c" path="/var/lib/kubelet/pods/306aebc0-cf33-45f7-8669-d354b0ae713c/volumes"
Sep 23 10:33:55 addons-071702 kubelet[2456]: I0923 10:33:55.268529 2456 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6f50a1f3-ec1f-4d33-8fa9-9379bfc44a79" path="/var/lib/kubelet/pods/6f50a1f3-ec1f-4d33-8fa9-9379bfc44a79/volumes"
Sep 23 10:34:00 addons-071702 kubelet[2456]: E0923 10:34:00.263113 2456 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"busybox\" with ImagePullBackOff: \"Back-off pulling image \\\"gcr.io/k8s-minikube/busybox:1.28.4-glibc\\\"\"" pod="default/busybox" podUID="56edf1cc-2b21-496a-9452-47198379ea3f"
Sep 23 10:34:01 addons-071702 kubelet[2456]: E0923 10:34:01.263186 2456 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"registry-test\" with ImagePullBackOff: \"Back-off pulling image \\\"gcr.io/k8s-minikube/busybox\\\"\"" pod="default/registry-test" podUID="6b3b199e-5f82-4373-9012-03e4aeba41a7"
Sep 23 10:34:05 addons-071702 kubelet[2456]: I0923 10:34:05.321838 2456 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"gcp-creds\" (UniqueName: \"kubernetes.io/host-path/6b3b199e-5f82-4373-9012-03e4aeba41a7-gcp-creds\") pod \"6b3b199e-5f82-4373-9012-03e4aeba41a7\" (UID: \"6b3b199e-5f82-4373-9012-03e4aeba41a7\") "
Sep 23 10:34:05 addons-071702 kubelet[2456]: I0923 10:34:05.321890 2456 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bbx2p\" (UniqueName: \"kubernetes.io/projected/6b3b199e-5f82-4373-9012-03e4aeba41a7-kube-api-access-bbx2p\") pod \"6b3b199e-5f82-4373-9012-03e4aeba41a7\" (UID: \"6b3b199e-5f82-4373-9012-03e4aeba41a7\") "
Sep 23 10:34:05 addons-071702 kubelet[2456]: I0923 10:34:05.321969 2456 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6b3b199e-5f82-4373-9012-03e4aeba41a7-gcp-creds" (OuterVolumeSpecName: "gcp-creds") pod "6b3b199e-5f82-4373-9012-03e4aeba41a7" (UID: "6b3b199e-5f82-4373-9012-03e4aeba41a7"). InnerVolumeSpecName "gcp-creds". PluginName "kubernetes.io/host-path", VolumeGidValue ""
Sep 23 10:34:05 addons-071702 kubelet[2456]: I0923 10:34:05.324243 2456 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6b3b199e-5f82-4373-9012-03e4aeba41a7-kube-api-access-bbx2p" (OuterVolumeSpecName: "kube-api-access-bbx2p") pod "6b3b199e-5f82-4373-9012-03e4aeba41a7" (UID: "6b3b199e-5f82-4373-9012-03e4aeba41a7"). InnerVolumeSpecName "kube-api-access-bbx2p". PluginName "kubernetes.io/projected", VolumeGidValue ""
Sep 23 10:34:05 addons-071702 kubelet[2456]: I0923 10:34:05.422087 2456 reconciler_common.go:288] "Volume detached for volume \"gcp-creds\" (UniqueName: \"kubernetes.io/host-path/6b3b199e-5f82-4373-9012-03e4aeba41a7-gcp-creds\") on node \"addons-071702\" DevicePath \"\""
Sep 23 10:34:05 addons-071702 kubelet[2456]: I0923 10:34:05.422117 2456 reconciler_common.go:288] "Volume detached for volume \"kube-api-access-bbx2p\" (UniqueName: \"kubernetes.io/projected/6b3b199e-5f82-4373-9012-03e4aeba41a7-kube-api-access-bbx2p\") on node \"addons-071702\" DevicePath \"\""
Sep 23 10:34:05 addons-071702 kubelet[2456]: I0923 10:34:05.873229 2456 scope.go:117] "RemoveContainer" containerID="5d114050ac1c887356b180e39c42b27a6b7eec948223cab236e11378c8ec2b63"
Sep 23 10:34:06 addons-071702 kubelet[2456]: I0923 10:34:06.025337 2456 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9p2b4\" (UniqueName: \"kubernetes.io/projected/8964449b-425a-4614-aa7b-d6cc98a185c7-kube-api-access-9p2b4\") pod \"8964449b-425a-4614-aa7b-d6cc98a185c7\" (UID: \"8964449b-425a-4614-aa7b-d6cc98a185c7\") "
Sep 23 10:34:06 addons-071702 kubelet[2456]: I0923 10:34:06.025389 2456 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wntlf\" (UniqueName: \"kubernetes.io/projected/42afc9e2-bf4a-4c0a-9db9-8f58e617fcbe-kube-api-access-wntlf\") pod \"42afc9e2-bf4a-4c0a-9db9-8f58e617fcbe\" (UID: \"42afc9e2-bf4a-4c0a-9db9-8f58e617fcbe\") "
Sep 23 10:34:06 addons-071702 kubelet[2456]: I0923 10:34:06.027227 2456 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8964449b-425a-4614-aa7b-d6cc98a185c7-kube-api-access-9p2b4" (OuterVolumeSpecName: "kube-api-access-9p2b4") pod "8964449b-425a-4614-aa7b-d6cc98a185c7" (UID: "8964449b-425a-4614-aa7b-d6cc98a185c7"). InnerVolumeSpecName "kube-api-access-9p2b4". PluginName "kubernetes.io/projected", VolumeGidValue ""
Sep 23 10:34:06 addons-071702 kubelet[2456]: I0923 10:34:06.027300 2456 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/42afc9e2-bf4a-4c0a-9db9-8f58e617fcbe-kube-api-access-wntlf" (OuterVolumeSpecName: "kube-api-access-wntlf") pod "42afc9e2-bf4a-4c0a-9db9-8f58e617fcbe" (UID: "42afc9e2-bf4a-4c0a-9db9-8f58e617fcbe"). InnerVolumeSpecName "kube-api-access-wntlf". PluginName "kubernetes.io/projected", VolumeGidValue ""
Sep 23 10:34:06 addons-071702 kubelet[2456]: I0923 10:34:06.125614 2456 reconciler_common.go:288] "Volume detached for volume \"kube-api-access-9p2b4\" (UniqueName: \"kubernetes.io/projected/8964449b-425a-4614-aa7b-d6cc98a185c7-kube-api-access-9p2b4\") on node \"addons-071702\" DevicePath \"\""
Sep 23 10:34:06 addons-071702 kubelet[2456]: I0923 10:34:06.125654 2456 reconciler_common.go:288] "Volume detached for volume \"kube-api-access-wntlf\" (UniqueName: \"kubernetes.io/projected/42afc9e2-bf4a-4c0a-9db9-8f58e617fcbe-kube-api-access-wntlf\") on node \"addons-071702\" DevicePath \"\""
==> storage-provisioner [283a48bd8ff1] <==
I0923 10:21:36.055606 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I0923 10:21:36.152850 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I0923 10:21:36.152906 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I0923 10:21:36.162733 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I0923 10:21:36.164081 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_addons-071702_3a8d257a-89b6-4281-b346-48781c7e48dc!
I0923 10:21:36.164921 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"01b33793-dc9e-4264-879d-944e72e95cff", APIVersion:"v1", ResourceVersion:"577", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' addons-071702_3a8d257a-89b6-4281-b346-48781c7e48dc became leader
I0923 10:21:36.269929 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_addons-071702_3a8d257a-89b6-4281-b346-48781c7e48dc!
-- /stdout --
helpers_test.go:254: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p addons-071702 -n addons-071702
helpers_test.go:261: (dbg) Run: kubectl --context addons-071702 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:272: non-running pods: busybox
helpers_test.go:274: ======> post-mortem[TestAddons/parallel/Registry]: describe non-running pods <======
helpers_test.go:277: (dbg) Run: kubectl --context addons-071702 describe pod busybox
helpers_test.go:282: (dbg) kubectl --context addons-071702 describe pod busybox:
-- stdout --
Name: busybox
Namespace: default
Priority: 0
Service Account: default
Node: addons-071702/192.168.49.2
Start Time: Mon, 23 Sep 2024 10:24:51 +0000
Labels: integration-test=busybox
Annotations: <none>
Status: Pending
IP: 10.244.0.27
IPs:
IP: 10.244.0.27
Containers:
busybox:
Container ID:
Image: gcr.io/k8s-minikube/busybox:1.28.4-glibc
Image ID:
Port: <none>
Host Port: <none>
Command:
sleep
3600
State: Waiting
Reason: ImagePullBackOff
Ready: False
Restart Count: 0
Environment:
GOOGLE_APPLICATION_CREDENTIALS: /google-app-creds.json
PROJECT_ID: this_is_fake
GCP_PROJECT: this_is_fake
GCLOUD_PROJECT: this_is_fake
GOOGLE_CLOUD_PROJECT: this_is_fake
CLOUDSDK_CORE_PROJECT: this_is_fake
Mounts:
/google-app-creds.json from gcp-creds (ro)
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-7j7xd (ro)
Conditions:
Type Status
PodReadyToStartContainers True
Initialized True
Ready False
ContainersReady False
PodScheduled True
Volumes:
kube-api-access-7j7xd:
Type: Projected (a volume that contains injected data from multiple sources)
TokenExpirationSeconds: 3607
ConfigMapName: kube-root-ca.crt
ConfigMapOptional: <nil>
DownwardAPI: true
gcp-creds:
Type: HostPath (bare host directory volume)
Path: /var/lib/minikube/google_application_credentials.json
HostPathType: File
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 9m16s default-scheduler Successfully assigned default/busybox to addons-071702
Normal Pulling 7m48s (x4 over 9m15s) kubelet Pulling image "gcr.io/k8s-minikube/busybox:1.28.4-glibc"
Warning Failed 7m48s (x4 over 9m15s) kubelet Failed to pull image "gcr.io/k8s-minikube/busybox:1.28.4-glibc": Error response from daemon: Head "https://gcr.io/v2/k8s-minikube/busybox/manifests/1.28.4-glibc": unauthorized: authentication failed
Warning Failed 7m48s (x4 over 9m15s) kubelet Error: ErrImagePull
Warning Failed 7m35s (x6 over 9m15s) kubelet Error: ImagePullBackOff
Normal BackOff 4m14s (x21 over 9m15s) kubelet Back-off pulling image "gcr.io/k8s-minikube/busybox:1.28.4-glibc"
-- /stdout --
helpers_test.go:285: <<< TestAddons/parallel/Registry FAILED: end of post-mortem logs <<<
helpers_test.go:286: ---------------------/post-mortem---------------------------------
--- FAIL: TestAddons/parallel/Registry (73.43s)