=== RUN TestAddons/parallel/Ingress
=== PAUSE TestAddons/parallel/Ingress
=== CONT TestAddons/parallel/Ingress
addons_test.go:207: (dbg) Run: kubectl --context addons-122411 wait --for=condition=ready --namespace=ingress-nginx pod --selector=app.kubernetes.io/component=controller --timeout=90s
addons_test.go:232: (dbg) Run: kubectl --context addons-122411 replace --force -f testdata/nginx-ingress-v1.yaml
addons_test.go:245: (dbg) Run: kubectl --context addons-122411 replace --force -f testdata/nginx-pod-svc.yaml
addons_test.go:250: (dbg) TestAddons/parallel/Ingress: waiting 8m0s for pods matching "run=nginx" in namespace "default" ...
helpers_test.go:344: "nginx" [3740aa35-79da-4d05-8871-d98638231bbe] Pending / Ready:ContainersNotReady (containers with unready status: [nginx]) / ContainersReady:ContainersNotReady (containers with unready status: [nginx])
helpers_test.go:344: "nginx" [3740aa35-79da-4d05-8871-d98638231bbe] Running
addons_test.go:250: (dbg) TestAddons/parallel/Ingress: run=nginx healthy within 9.003339652s
addons_test.go:262: (dbg) Run: out/minikube-linux-arm64 -p addons-122411 ssh "curl -s http://127.0.0.1/ -H 'Host: nginx.example.com'"
addons_test.go:286: (dbg) Run: kubectl --context addons-122411 replace --force -f testdata/ingress-dns-example-v1.yaml
addons_test.go:291: (dbg) Run: out/minikube-linux-arm64 -p addons-122411 ip
addons_test.go:297: (dbg) Run: nslookup hello-john.test 192.168.49.2
addons_test.go:297: (dbg) Non-zero exit: nslookup hello-john.test 192.168.49.2: exit status 1 (15.06859893s)
-- stdout --
;; connection timed out; no servers could be reached
-- /stdout --
addons_test.go:299: failed to nslookup hello-john.test host. args "nslookup hello-john.test 192.168.49.2" : exit status 1
addons_test.go:303: unexpected output from nslookup. stdout: ;; connection timed out; no servers could be reached
stderr:
addons_test.go:306: (dbg) Run: out/minikube-linux-arm64 -p addons-122411 addons disable ingress-dns --alsologtostderr -v=1
addons_test.go:306: (dbg) Done: out/minikube-linux-arm64 -p addons-122411 addons disable ingress-dns --alsologtostderr -v=1: (1.550676035s)
addons_test.go:311: (dbg) Run: out/minikube-linux-arm64 -p addons-122411 addons disable ingress --alsologtostderr -v=1
addons_test.go:311: (dbg) Done: out/minikube-linux-arm64 -p addons-122411 addons disable ingress --alsologtostderr -v=1: (7.802419724s)
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:230: ======> post-mortem[TestAddons/parallel/Ingress]: docker inspect <======
helpers_test.go:231: (dbg) Run: docker inspect addons-122411
helpers_test.go:235: (dbg) docker inspect addons-122411:
-- stdout --
[
{
"Id": "176ef05a369c7f222c83d39fff69434c357b6987b0bb949646512becd40d2cf1",
"Created": "2024-03-14T00:21:03.230132862Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 1965180,
"ExitCode": 0,
"Error": "",
"StartedAt": "2024-03-14T00:21:03.555504645Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:db62270b4bb0cfcde696782f7a6322baca275275e31814ce9fd8998407bf461e",
"ResolvConfPath": "/var/lib/docker/containers/176ef05a369c7f222c83d39fff69434c357b6987b0bb949646512becd40d2cf1/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/176ef05a369c7f222c83d39fff69434c357b6987b0bb949646512becd40d2cf1/hostname",
"HostsPath": "/var/lib/docker/containers/176ef05a369c7f222c83d39fff69434c357b6987b0bb949646512becd40d2cf1/hosts",
"LogPath": "/var/lib/docker/containers/176ef05a369c7f222c83d39fff69434c357b6987b0bb949646512becd40d2cf1/176ef05a369c7f222c83d39fff69434c357b6987b0bb949646512becd40d2cf1-json.log",
"Name": "/addons-122411",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"addons-122411:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {}
},
"NetworkMode": "addons-122411",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 4194304000,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 8388608000,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"LowerDir": "/var/lib/docker/overlay2/c8bdf5a27efba5a4997d4700b0518572d89c6d5a44e34063524bfeaf79ca3d07-init/diff:/var/lib/docker/overlay2/72e8565c3c6c9dcaff9dab92d595dc2eb0a265ce93caf6066e88703bac9975f6/diff",
"MergedDir": "/var/lib/docker/overlay2/c8bdf5a27efba5a4997d4700b0518572d89c6d5a44e34063524bfeaf79ca3d07/merged",
"UpperDir": "/var/lib/docker/overlay2/c8bdf5a27efba5a4997d4700b0518572d89c6d5a44e34063524bfeaf79ca3d07/diff",
"WorkDir": "/var/lib/docker/overlay2/c8bdf5a27efba5a4997d4700b0518572d89c6d5a44e34063524bfeaf79ca3d07/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "addons-122411",
"Source": "/var/lib/docker/volumes/addons-122411/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "addons-122411",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1710284843-18375@sha256:d67c38c9fc2ad14c48d95e17cbac49314325db5758d8f7b3de60b927e62ce94f",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "addons-122411",
"name.minikube.sigs.k8s.io": "addons-122411",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "d049a38919a06ce13ed0e4988e9299910a2fee7b311a9c0e1c0ff968b7a7b098",
"SandboxKey": "/var/run/docker/netns/d049a38919a0",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "35041"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "35040"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "35037"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "35039"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "35038"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"addons-122411": {
"IPAMConfig": {
"IPv4Address": "192.168.49.2"
},
"Links": null,
"Aliases": [
"176ef05a369c",
"addons-122411"
],
"MacAddress": "02:42:c0:a8:31:02",
"NetworkID": "63eedb99593175e728ef3b639658b3fb3d5e886ced134b22ec30ab80d963032e",
"EndpointID": "5ffa29c93b7a54bb69294aa72065945930797a3c143433b026dc3bd00c5b9f61",
"Gateway": "192.168.49.1",
"IPAddress": "192.168.49.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DriverOpts": null,
"DNSNames": [
"addons-122411",
"176ef05a369c"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:239: (dbg) Run: out/minikube-linux-arm64 status --format={{.Host}} -p addons-122411 -n addons-122411
helpers_test.go:244: <<< TestAddons/parallel/Ingress FAILED: start of post-mortem logs <<<
helpers_test.go:245: ======> post-mortem[TestAddons/parallel/Ingress]: minikube logs <======
helpers_test.go:247: (dbg) Run: out/minikube-linux-arm64 -p addons-122411 logs -n 25
helpers_test.go:247: (dbg) Done: out/minikube-linux-arm64 -p addons-122411 logs -n 25: (2.117984149s)
helpers_test.go:252: TestAddons/parallel/Ingress logs:
-- stdout --
==> Audit <==
|---------|--------------------------------------|------------------------|---------|---------|---------------------|---------------------|
| Command | Args | Profile | User | Version | Start Time | End Time |
|---------|--------------------------------------|------------------------|---------|---------|---------------------|---------------------|
| start | -o=json --download-only | download-only-540583 | jenkins | v1.32.0 | 14 Mar 24 00:20 UTC | |
| | -p download-only-540583 | | | | | |
| | --force --alsologtostderr | | | | | |
| | --kubernetes-version=v1.28.4 | | | | | |
| | --container-runtime=containerd | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=containerd | | | | | |
| delete | --all | minikube | jenkins | v1.32.0 | 14 Mar 24 00:20 UTC | 14 Mar 24 00:20 UTC |
| delete | -p download-only-540583 | download-only-540583 | jenkins | v1.32.0 | 14 Mar 24 00:20 UTC | 14 Mar 24 00:20 UTC |
| start | -o=json --download-only | download-only-541047 | jenkins | v1.32.0 | 14 Mar 24 00:20 UTC | |
| | -p download-only-541047 | | | | | |
| | --force --alsologtostderr | | | | | |
| | --kubernetes-version=v1.29.0-rc.2 | | | | | |
| | --container-runtime=containerd | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=containerd | | | | | |
| delete | --all | minikube | jenkins | v1.32.0 | 14 Mar 24 00:20 UTC | 14 Mar 24 00:20 UTC |
| delete | -p download-only-541047 | download-only-541047 | jenkins | v1.32.0 | 14 Mar 24 00:20 UTC | 14 Mar 24 00:20 UTC |
| delete | -p download-only-455584 | download-only-455584 | jenkins | v1.32.0 | 14 Mar 24 00:20 UTC | 14 Mar 24 00:20 UTC |
| delete | -p download-only-540583 | download-only-540583 | jenkins | v1.32.0 | 14 Mar 24 00:20 UTC | 14 Mar 24 00:20 UTC |
| delete | -p download-only-541047 | download-only-541047 | jenkins | v1.32.0 | 14 Mar 24 00:20 UTC | 14 Mar 24 00:20 UTC |
| start | --download-only -p | download-docker-976036 | jenkins | v1.32.0 | 14 Mar 24 00:20 UTC | |
| | download-docker-976036 | | | | | |
| | --alsologtostderr | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=containerd | | | | | |
| delete | -p download-docker-976036 | download-docker-976036 | jenkins | v1.32.0 | 14 Mar 24 00:20 UTC | 14 Mar 24 00:20 UTC |
| start | --download-only -p | binary-mirror-069067 | jenkins | v1.32.0 | 14 Mar 24 00:20 UTC | |
| | binary-mirror-069067 | | | | | |
| | --alsologtostderr | | | | | |
| | --binary-mirror | | | | | |
| | http://127.0.0.1:39983 | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=containerd | | | | | |
| delete | -p binary-mirror-069067 | binary-mirror-069067 | jenkins | v1.32.0 | 14 Mar 24 00:20 UTC | 14 Mar 24 00:20 UTC |
| addons | enable dashboard -p | addons-122411 | jenkins | v1.32.0 | 14 Mar 24 00:20 UTC | |
| | addons-122411 | | | | | |
| addons | disable dashboard -p | addons-122411 | jenkins | v1.32.0 | 14 Mar 24 00:20 UTC | |
| | addons-122411 | | | | | |
| start | -p addons-122411 --wait=true | addons-122411 | jenkins | v1.32.0 | 14 Mar 24 00:20 UTC | 14 Mar 24 00:22 UTC |
| | --memory=4000 --alsologtostderr | | | | | |
| | --addons=registry | | | | | |
| | --addons=metrics-server | | | | | |
| | --addons=volumesnapshots | | | | | |
| | --addons=csi-hostpath-driver | | | | | |
| | --addons=gcp-auth | | | | | |
| | --addons=cloud-spanner | | | | | |
| | --addons=inspektor-gadget | | | | | |
| | --addons=storage-provisioner-rancher | | | | | |
| | --addons=nvidia-device-plugin | | | | | |
| | --addons=yakd --driver=docker | | | | | |
| | --container-runtime=containerd | | | | | |
| | --addons=ingress | | | | | |
| | --addons=ingress-dns | | | | | |
| ip | addons-122411 ip | addons-122411 | jenkins | v1.32.0 | 14 Mar 24 00:22 UTC | 14 Mar 24 00:22 UTC |
| addons | addons-122411 addons disable | addons-122411 | jenkins | v1.32.0 | 14 Mar 24 00:22 UTC | 14 Mar 24 00:22 UTC |
| | registry --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| addons | addons-122411 addons | addons-122411 | jenkins | v1.32.0 | 14 Mar 24 00:23 UTC | 14 Mar 24 00:23 UTC |
| | disable metrics-server | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | disable inspektor-gadget -p | addons-122411 | jenkins | v1.32.0 | 14 Mar 24 00:23 UTC | 14 Mar 24 00:23 UTC |
| | addons-122411 | | | | | |
| ssh | addons-122411 ssh curl -s | addons-122411 | jenkins | v1.32.0 | 14 Mar 24 00:23 UTC | 14 Mar 24 00:23 UTC |
| | http://127.0.0.1/ -H 'Host: | | | | | |
| | nginx.example.com' | | | | | |
| ip | addons-122411 ip | addons-122411 | jenkins | v1.32.0 | 14 Mar 24 00:23 UTC | 14 Mar 24 00:23 UTC |
| addons | addons-122411 addons disable | addons-122411 | jenkins | v1.32.0 | 14 Mar 24 00:23 UTC | 14 Mar 24 00:23 UTC |
| | ingress-dns --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| addons | addons-122411 addons disable | addons-122411 | jenkins | v1.32.0 | 14 Mar 24 00:23 UTC | 14 Mar 24 00:23 UTC |
| | ingress --alsologtostderr -v=1 | | | | | |
| addons | addons-122411 addons | addons-122411 | jenkins | v1.32.0 | 14 Mar 24 00:23 UTC | |
| | disable csi-hostpath-driver | | | | | |
| | --alsologtostderr -v=1 | | | | | |
|---------|--------------------------------------|------------------------|---------|---------|---------------------|---------------------|
==> Last Start <==
Log file created at: 2024/03/14 00:20:39
Running on machine: ip-172-31-31-251
Binary: Built with gc go1.22.1 for linux/arm64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I0314 00:20:39.439654 1964718 out.go:291] Setting OutFile to fd 1 ...
I0314 00:20:39.439838 1964718 out.go:338] TERM=,COLORTERM=, which probably does not support color
I0314 00:20:39.439850 1964718 out.go:304] Setting ErrFile to fd 2...
I0314 00:20:39.439855 1964718 out.go:338] TERM=,COLORTERM=, which probably does not support color
I0314 00:20:39.440131 1964718 root.go:338] Updating PATH: /home/jenkins/minikube-integration/18375-1958430/.minikube/bin
I0314 00:20:39.440654 1964718 out.go:298] Setting JSON to false
I0314 00:20:39.441556 1964718 start.go:129] hostinfo: {"hostname":"ip-172-31-31-251","uptime":28990,"bootTime":1710346650,"procs":166,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1055-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"982e3628-3742-4b3e-bb63-ac1b07660ec7"}
I0314 00:20:39.441629 1964718 start.go:139] virtualization:
I0314 00:20:39.443812 1964718 out.go:177] * [addons-122411] minikube v1.32.0 on Ubuntu 20.04 (arm64)
I0314 00:20:39.445681 1964718 out.go:177] - MINIKUBE_LOCATION=18375
I0314 00:20:39.447507 1964718 out.go:177] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I0314 00:20:39.445860 1964718 notify.go:220] Checking for updates...
I0314 00:20:39.451532 1964718 out.go:177] - KUBECONFIG=/home/jenkins/minikube-integration/18375-1958430/kubeconfig
I0314 00:20:39.453725 1964718 out.go:177] - MINIKUBE_HOME=/home/jenkins/minikube-integration/18375-1958430/.minikube
I0314 00:20:39.455904 1964718 out.go:177] - MINIKUBE_BIN=out/minikube-linux-arm64
I0314 00:20:39.457667 1964718 out.go:177] - MINIKUBE_FORCE_SYSTEMD=
I0314 00:20:39.459775 1964718 driver.go:392] Setting default libvirt URI to qemu:///system
I0314 00:20:39.480126 1964718 docker.go:122] docker version: linux-25.0.4:Docker Engine - Community
I0314 00:20:39.480256 1964718 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0314 00:20:39.544145 1964718 info.go:266] docker info: {ID:EOU5:DNGX:XN6V:L2FZ:UXRM:5TWK:EVUR:KC2F:GT7Z:Y4O4:GB77:5PD3 Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:28 OomKillDisable:true NGoroutines:49 SystemTime:2024-03-14 00:20:39.535313445 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1055-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:aar
ch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8215040000 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-31-251 Labels:[] ExperimentalBuild:false ServerVersion:25.0.4 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:ae07eda36dd25f8a1b98dfbf587313b99c0190bb Expected:ae07eda36dd25f8a1b98dfbf587313b99c0190bb} RuncCommit:{ID:v1.1.12-0-g51d5e94 Expected:v1.1.12-0-g51d5e94} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErro
rs:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.13.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.24.7]] Warnings:<nil>}}
I0314 00:20:39.544249 1964718 docker.go:295] overlay module found
I0314 00:20:39.546120 1964718 out.go:177] * Using the docker driver based on user configuration
I0314 00:20:39.547820 1964718 start.go:297] selected driver: docker
I0314 00:20:39.547837 1964718 start.go:901] validating driver "docker" against <nil>
I0314 00:20:39.547849 1964718 start.go:912] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I0314 00:20:39.548497 1964718 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0314 00:20:39.601302 1964718 info.go:266] docker info: {ID:EOU5:DNGX:XN6V:L2FZ:UXRM:5TWK:EVUR:KC2F:GT7Z:Y4O4:GB77:5PD3 Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:28 OomKillDisable:true NGoroutines:49 SystemTime:2024-03-14 00:20:39.592656374 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1055-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:aar
ch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8215040000 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-31-251 Labels:[] ExperimentalBuild:false ServerVersion:25.0.4 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:ae07eda36dd25f8a1b98dfbf587313b99c0190bb Expected:ae07eda36dd25f8a1b98dfbf587313b99c0190bb} RuncCommit:{ID:v1.1.12-0-g51d5e94 Expected:v1.1.12-0-g51d5e94} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErro
rs:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.13.0] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.24.7]] Warnings:<nil>}}
I0314 00:20:39.601482 1964718 start_flags.go:310] no existing cluster config was found, will generate one from the flags
I0314 00:20:39.601717 1964718 start_flags.go:947] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0314 00:20:39.603526 1964718 out.go:177] * Using Docker driver with root privileges
I0314 00:20:39.605681 1964718 cni.go:84] Creating CNI manager for ""
I0314 00:20:39.605701 1964718 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I0314 00:20:39.605711 1964718 start_flags.go:319] Found "CNI" CNI - setting NetworkPlugin=cni
I0314 00:20:39.605815 1964718 start.go:340] cluster config:
{Name:addons-122411 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1710284843-18375@sha256:d67c38c9fc2ad14c48d95e17cbac49314325db5758d8f7b3de60b927e62ce94f Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.4 ClusterName:addons-122411 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime
:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.28.4 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHA
uthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0314 00:20:39.608739 1964718 out.go:177] * Starting "addons-122411" primary control-plane node in "addons-122411" cluster
I0314 00:20:39.610383 1964718 cache.go:121] Beginning downloading kic base image for docker with containerd
I0314 00:20:39.612431 1964718 out.go:177] * Pulling base image v0.0.42-1710284843-18375 ...
I0314 00:20:39.614095 1964718 preload.go:132] Checking if preload exists for k8s version v1.28.4 and runtime containerd
I0314 00:20:39.614126 1964718 image.go:79] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1710284843-18375@sha256:d67c38c9fc2ad14c48d95e17cbac49314325db5758d8f7b3de60b927e62ce94f in local docker daemon
I0314 00:20:39.614149 1964718 preload.go:147] Found local preload: /home/jenkins/minikube-integration/18375-1958430/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.4-containerd-overlay2-arm64.tar.lz4
I0314 00:20:39.614158 1964718 cache.go:56] Caching tarball of preloaded images
I0314 00:20:39.614257 1964718 preload.go:173] Found /home/jenkins/minikube-integration/18375-1958430/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.4-containerd-overlay2-arm64.tar.lz4 in cache, skipping download
I0314 00:20:39.614268 1964718 cache.go:59] Finished verifying existence of preloaded tar for v1.28.4 on containerd
I0314 00:20:39.614617 1964718 profile.go:142] Saving config to /home/jenkins/minikube-integration/18375-1958430/.minikube/profiles/addons-122411/config.json ...
I0314 00:20:39.614640 1964718 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/18375-1958430/.minikube/profiles/addons-122411/config.json: {Name:mk02168d44307eb6068e4761362f457ab1ba607d Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0314 00:20:39.630213 1964718 cache.go:149] Downloading gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1710284843-18375@sha256:d67c38c9fc2ad14c48d95e17cbac49314325db5758d8f7b3de60b927e62ce94f to local cache
I0314 00:20:39.630348 1964718 image.go:63] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1710284843-18375@sha256:d67c38c9fc2ad14c48d95e17cbac49314325db5758d8f7b3de60b927e62ce94f in local cache directory
I0314 00:20:39.630371 1964718 image.go:66] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1710284843-18375@sha256:d67c38c9fc2ad14c48d95e17cbac49314325db5758d8f7b3de60b927e62ce94f in local cache directory, skipping pull
I0314 00:20:39.630377 1964718 image.go:105] gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1710284843-18375@sha256:d67c38c9fc2ad14c48d95e17cbac49314325db5758d8f7b3de60b927e62ce94f exists in cache, skipping pull
I0314 00:20:39.630385 1964718 cache.go:152] successfully saved gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1710284843-18375@sha256:d67c38c9fc2ad14c48d95e17cbac49314325db5758d8f7b3de60b927e62ce94f as a tarball
I0314 00:20:39.630398 1964718 cache.go:162] Loading gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1710284843-18375@sha256:d67c38c9fc2ad14c48d95e17cbac49314325db5758d8f7b3de60b927e62ce94f from local cache
I0314 00:20:55.851630 1964718 cache.go:164] successfully loaded and using gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1710284843-18375@sha256:d67c38c9fc2ad14c48d95e17cbac49314325db5758d8f7b3de60b927e62ce94f from cached tarball
I0314 00:20:55.851671 1964718 cache.go:194] Successfully downloaded all kic artifacts
I0314 00:20:55.851714 1964718 start.go:360] acquireMachinesLock for addons-122411: {Name:mk319905a0730f62c640d4a155e69f9369879bc3 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0314 00:20:55.851864 1964718 start.go:364] duration metric: took 124.98µs to acquireMachinesLock for "addons-122411"
I0314 00:20:55.851899 1964718 start.go:93] Provisioning new machine with config: &{Name:addons-122411 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1710284843-18375@sha256:d67c38c9fc2ad14c48d95e17cbac49314325db5758d8f7b3de60b927e62ce94f Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.4 ClusterName:addons-122411 Namespace:default APIServerHAVIP: APIServerName:min
ikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.28.4 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:fa
lse CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.28.4 ContainerRuntime:containerd ControlPlane:true Worker:true}
I0314 00:20:55.851981 1964718 start.go:125] createHost starting for "" (driver="docker")
I0314 00:20:55.854177 1964718 out.go:204] * Creating docker container (CPUs=2, Memory=4000MB) ...
I0314 00:20:55.854419 1964718 start.go:159] libmachine.API.Create for "addons-122411" (driver="docker")
I0314 00:20:55.854455 1964718 client.go:168] LocalClient.Create starting
I0314 00:20:55.854572 1964718 main.go:141] libmachine: Creating CA: /home/jenkins/minikube-integration/18375-1958430/.minikube/certs/ca.pem
I0314 00:20:56.269665 1964718 main.go:141] libmachine: Creating client certificate: /home/jenkins/minikube-integration/18375-1958430/.minikube/certs/cert.pem
I0314 00:20:56.489561 1964718 cli_runner.go:164] Run: docker network inspect addons-122411 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W0314 00:20:56.504976 1964718 cli_runner.go:211] docker network inspect addons-122411 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I0314 00:20:56.505076 1964718 network_create.go:281] running [docker network inspect addons-122411] to gather additional debugging logs...
I0314 00:20:56.505097 1964718 cli_runner.go:164] Run: docker network inspect addons-122411
W0314 00:20:56.519684 1964718 cli_runner.go:211] docker network inspect addons-122411 returned with exit code 1
I0314 00:20:56.519718 1964718 network_create.go:284] error running [docker network inspect addons-122411]: docker network inspect addons-122411: exit status 1
stdout:
[]
stderr:
Error response from daemon: network addons-122411 not found
I0314 00:20:56.519734 1964718 network_create.go:286] output of [docker network inspect addons-122411]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network addons-122411 not found
** /stderr **
I0314 00:20:56.519829 1964718 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0314 00:20:56.536914 1964718 network.go:206] using free private subnet 192.168.49.0/24: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0x400256bff0}
I0314 00:20:56.536950 1964718 network_create.go:124] attempt to create docker network addons-122411 192.168.49.0/24 with gateway 192.168.49.1 and MTU of 1500 ...
I0314 00:20:56.537009 1964718 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.49.0/24 --gateway=192.168.49.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=addons-122411 addons-122411
I0314 00:20:56.598219 1964718 network_create.go:108] docker network addons-122411 192.168.49.0/24 created
I0314 00:20:56.598252 1964718 kic.go:121] calculated static IP "192.168.49.2" for the "addons-122411" container
I0314 00:20:56.598344 1964718 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0314 00:20:56.612420 1964718 cli_runner.go:164] Run: docker volume create addons-122411 --label name.minikube.sigs.k8s.io=addons-122411 --label created_by.minikube.sigs.k8s.io=true
I0314 00:20:56.628706 1964718 oci.go:103] Successfully created a docker volume addons-122411
I0314 00:20:56.628787 1964718 cli_runner.go:164] Run: docker run --rm --name addons-122411-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-122411 --entrypoint /usr/bin/test -v addons-122411:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1710284843-18375@sha256:d67c38c9fc2ad14c48d95e17cbac49314325db5758d8f7b3de60b927e62ce94f -d /var/lib
I0314 00:20:58.665986 1964718 cli_runner.go:217] Completed: docker run --rm --name addons-122411-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-122411 --entrypoint /usr/bin/test -v addons-122411:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1710284843-18375@sha256:d67c38c9fc2ad14c48d95e17cbac49314325db5758d8f7b3de60b927e62ce94f -d /var/lib: (2.037153702s)
I0314 00:20:58.666023 1964718 oci.go:107] Successfully prepared a docker volume addons-122411
I0314 00:20:58.666058 1964718 preload.go:132] Checking if preload exists for k8s version v1.28.4 and runtime containerd
I0314 00:20:58.666079 1964718 kic.go:194] Starting extracting preloaded images to volume ...
I0314 00:20:58.666152 1964718 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/18375-1958430/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.4-containerd-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v addons-122411:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1710284843-18375@sha256:d67c38c9fc2ad14c48d95e17cbac49314325db5758d8f7b3de60b927e62ce94f -I lz4 -xf /preloaded.tar -C /extractDir
I0314 00:21:03.161685 1964718 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/18375-1958430/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.4-containerd-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v addons-122411:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1710284843-18375@sha256:d67c38c9fc2ad14c48d95e17cbac49314325db5758d8f7b3de60b927e62ce94f -I lz4 -xf /preloaded.tar -C /extractDir: (4.495493894s)
I0314 00:21:03.161719 1964718 kic.go:203] duration metric: took 4.495635866s to extract preloaded images to volume ...
W0314 00:21:03.161861 1964718 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I0314 00:21:03.161986 1964718 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0314 00:21:03.212688 1964718 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname addons-122411 --name addons-122411 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-122411 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=addons-122411 --network addons-122411 --ip 192.168.49.2 --volume addons-122411:/var --security-opt apparmor=unconfined --memory=4000mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1710284843-18375@sha256:d67c38c9fc2ad14c48d95e17cbac49314325db5758d8f7b3de60b927e62ce94f
I0314 00:21:03.566159 1964718 cli_runner.go:164] Run: docker container inspect addons-122411 --format={{.State.Running}}
I0314 00:21:03.587361 1964718 cli_runner.go:164] Run: docker container inspect addons-122411 --format={{.State.Status}}
I0314 00:21:03.614924 1964718 cli_runner.go:164] Run: docker exec addons-122411 stat /var/lib/dpkg/alternatives/iptables
I0314 00:21:03.669549 1964718 oci.go:144] the created container "addons-122411" has a running status.
I0314 00:21:03.669577 1964718 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/18375-1958430/.minikube/machines/addons-122411/id_rsa...
I0314 00:21:04.370991 1964718 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/18375-1958430/.minikube/machines/addons-122411/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0314 00:21:04.406523 1964718 cli_runner.go:164] Run: docker container inspect addons-122411 --format={{.State.Status}}
I0314 00:21:04.428788 1964718 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0314 00:21:04.428808 1964718 kic_runner.go:114] Args: [docker exec --privileged addons-122411 chown docker:docker /home/docker/.ssh/authorized_keys]
I0314 00:21:04.490950 1964718 cli_runner.go:164] Run: docker container inspect addons-122411 --format={{.State.Status}}
I0314 00:21:04.509667 1964718 machine.go:94] provisionDockerMachine start ...
I0314 00:21:04.509758 1964718 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-122411
I0314 00:21:04.529765 1964718 main.go:141] libmachine: Using SSH client type: native
I0314 00:21:04.530013 1964718 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e1e00] 0x3e4660 <nil> [] 0s} 127.0.0.1 35041 <nil> <nil>}
I0314 00:21:04.530022 1964718 main.go:141] libmachine: About to run SSH command:
hostname
I0314 00:21:04.670868 1964718 main.go:141] libmachine: SSH cmd err, output: <nil>: addons-122411
I0314 00:21:04.670895 1964718 ubuntu.go:169] provisioning hostname "addons-122411"
I0314 00:21:04.670966 1964718 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-122411
I0314 00:21:04.691129 1964718 main.go:141] libmachine: Using SSH client type: native
I0314 00:21:04.691391 1964718 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e1e00] 0x3e4660 <nil> [] 0s} 127.0.0.1 35041 <nil> <nil>}
I0314 00:21:04.691403 1964718 main.go:141] libmachine: About to run SSH command:
sudo hostname addons-122411 && echo "addons-122411" | sudo tee /etc/hostname
I0314 00:21:04.846854 1964718 main.go:141] libmachine: SSH cmd err, output: <nil>: addons-122411
I0314 00:21:04.846944 1964718 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-122411
I0314 00:21:04.862893 1964718 main.go:141] libmachine: Using SSH client type: native
I0314 00:21:04.863173 1964718 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x3e1e00] 0x3e4660 <nil> [] 0s} 127.0.0.1 35041 <nil> <nil>}
I0314 00:21:04.863195 1964718 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\saddons-122411' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 addons-122411/g' /etc/hosts;
else
echo '127.0.1.1 addons-122411' | sudo tee -a /etc/hosts;
fi
fi
I0314 00:21:05.016038 1964718 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0314 00:21:05.016117 1964718 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/18375-1958430/.minikube CaCertPath:/home/jenkins/minikube-integration/18375-1958430/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/18375-1958430/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/18375-1958430/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/18375-1958430/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/18375-1958430/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/18375-1958430/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/18375-1958430/.minikube}
I0314 00:21:05.016156 1964718 ubuntu.go:177] setting up certificates
I0314 00:21:05.016168 1964718 provision.go:84] configureAuth start
I0314 00:21:05.016238 1964718 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-122411
I0314 00:21:05.033590 1964718 provision.go:143] copyHostCerts
I0314 00:21:05.033688 1964718 exec_runner.go:151] cp: /home/jenkins/minikube-integration/18375-1958430/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/18375-1958430/.minikube/cert.pem (1123 bytes)
I0314 00:21:05.033854 1964718 exec_runner.go:151] cp: /home/jenkins/minikube-integration/18375-1958430/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/18375-1958430/.minikube/key.pem (1675 bytes)
I0314 00:21:05.033926 1964718 exec_runner.go:151] cp: /home/jenkins/minikube-integration/18375-1958430/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/18375-1958430/.minikube/ca.pem (1078 bytes)
I0314 00:21:05.033991 1964718 provision.go:117] generating server cert: /home/jenkins/minikube-integration/18375-1958430/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/18375-1958430/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/18375-1958430/.minikube/certs/ca-key.pem org=jenkins.addons-122411 san=[127.0.0.1 192.168.49.2 addons-122411 localhost minikube]
I0314 00:21:05.430048 1964718 provision.go:177] copyRemoteCerts
I0314 00:21:05.430122 1964718 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0314 00:21:05.430162 1964718 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-122411
I0314 00:21:05.449939 1964718 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35041 SSHKeyPath:/home/jenkins/minikube-integration/18375-1958430/.minikube/machines/addons-122411/id_rsa Username:docker}
I0314 00:21:05.548303 1964718 ssh_runner.go:362] scp /home/jenkins/minikube-integration/18375-1958430/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I0314 00:21:05.572615 1964718 ssh_runner.go:362] scp /home/jenkins/minikube-integration/18375-1958430/.minikube/machines/server.pem --> /etc/docker/server.pem (1208 bytes)
I0314 00:21:05.596350 1964718 ssh_runner.go:362] scp /home/jenkins/minikube-integration/18375-1958430/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I0314 00:21:05.621503 1964718 provision.go:87] duration metric: took 605.321349ms to configureAuth
I0314 00:21:05.621532 1964718 ubuntu.go:193] setting minikube options for container-runtime
I0314 00:21:05.621722 1964718 config.go:182] Loaded profile config "addons-122411": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.4
I0314 00:21:05.621744 1964718 machine.go:97] duration metric: took 1.112052046s to provisionDockerMachine
I0314 00:21:05.621751 1964718 client.go:171] duration metric: took 9.767287281s to LocalClient.Create
I0314 00:21:05.621774 1964718 start.go:167] duration metric: took 9.767356204s to libmachine.API.Create "addons-122411"
I0314 00:21:05.621785 1964718 start.go:293] postStartSetup for "addons-122411" (driver="docker")
I0314 00:21:05.621795 1964718 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0314 00:21:05.621858 1964718 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0314 00:21:05.621911 1964718 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-122411
I0314 00:21:05.638041 1964718 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35041 SSHKeyPath:/home/jenkins/minikube-integration/18375-1958430/.minikube/machines/addons-122411/id_rsa Username:docker}
I0314 00:21:05.740553 1964718 ssh_runner.go:195] Run: cat /etc/os-release
I0314 00:21:05.743792 1964718 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0314 00:21:05.743828 1964718 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0314 00:21:05.743858 1964718 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0314 00:21:05.743867 1964718 info.go:137] Remote host: Ubuntu 22.04.4 LTS
I0314 00:21:05.743877 1964718 filesync.go:126] Scanning /home/jenkins/minikube-integration/18375-1958430/.minikube/addons for local assets ...
I0314 00:21:05.743954 1964718 filesync.go:126] Scanning /home/jenkins/minikube-integration/18375-1958430/.minikube/files for local assets ...
I0314 00:21:05.743985 1964718 start.go:296] duration metric: took 122.194455ms for postStartSetup
I0314 00:21:05.744299 1964718 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-122411
I0314 00:21:05.759833 1964718 profile.go:142] Saving config to /home/jenkins/minikube-integration/18375-1958430/.minikube/profiles/addons-122411/config.json ...
I0314 00:21:05.760130 1964718 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0314 00:21:05.760195 1964718 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-122411
I0314 00:21:05.775611 1964718 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35041 SSHKeyPath:/home/jenkins/minikube-integration/18375-1958430/.minikube/machines/addons-122411/id_rsa Username:docker}
I0314 00:21:05.871856 1964718 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0314 00:21:05.876211 1964718 start.go:128] duration metric: took 10.024212959s to createHost
I0314 00:21:05.876239 1964718 start.go:83] releasing machines lock for "addons-122411", held for 10.024356261s
I0314 00:21:05.876334 1964718 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-122411
I0314 00:21:05.892401 1964718 ssh_runner.go:195] Run: cat /version.json
I0314 00:21:05.892426 1964718 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0314 00:21:05.892460 1964718 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-122411
I0314 00:21:05.892509 1964718 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-122411
I0314 00:21:05.913974 1964718 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35041 SSHKeyPath:/home/jenkins/minikube-integration/18375-1958430/.minikube/machines/addons-122411/id_rsa Username:docker}
I0314 00:21:05.915169 1964718 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35041 SSHKeyPath:/home/jenkins/minikube-integration/18375-1958430/.minikube/machines/addons-122411/id_rsa Username:docker}
I0314 00:21:06.125947 1964718 ssh_runner.go:195] Run: systemctl --version
I0314 00:21:06.130672 1964718 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0314 00:21:06.135129 1964718 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I0314 00:21:06.161302 1964718 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I0314 00:21:06.161381 1964718 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%!p(MISSING), " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0314 00:21:06.191318 1964718 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I0314 00:21:06.191340 1964718 start.go:494] detecting cgroup driver to use...
I0314 00:21:06.191406 1964718 detect.go:196] detected "cgroupfs" cgroup driver on host os
I0314 00:21:06.191481 1964718 ssh_runner.go:195] Run: sudo systemctl stop -f crio
I0314 00:21:06.204090 1964718 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0314 00:21:06.215808 1964718 docker.go:217] disabling cri-docker service (if available) ...
I0314 00:21:06.215895 1964718 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket
I0314 00:21:06.229770 1964718 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service
I0314 00:21:06.245484 1964718 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket
I0314 00:21:06.330138 1964718 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service
I0314 00:21:06.421801 1964718 docker.go:233] disabling docker service ...
I0314 00:21:06.421879 1964718 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket
I0314 00:21:06.441280 1964718 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service
I0314 00:21:06.453178 1964718 ssh_runner.go:195] Run: sudo systemctl disable docker.socket
I0314 00:21:06.534792 1964718 ssh_runner.go:195] Run: sudo systemctl mask docker.service
I0314 00:21:06.625563 1964718 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker
I0314 00:21:06.636627 1964718 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %!s(MISSING) "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0314 00:21:06.653295 1964718 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.9"|' /etc/containerd/config.toml"
I0314 00:21:06.664274 1964718 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0314 00:21:06.674861 1964718 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I0314 00:21:06.674969 1964718 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I0314 00:21:06.685664 1964718 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0314 00:21:06.696113 1964718 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0314 00:21:06.705870 1964718 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0314 00:21:06.716431 1964718 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0314 00:21:06.725348 1964718 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0314 00:21:06.734957 1964718 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0314 00:21:06.743627 1964718 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0314 00:21:06.752242 1964718 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0314 00:21:06.844979 1964718 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0314 00:21:06.974874 1964718 start.go:541] Will wait 60s for socket path /run/containerd/containerd.sock
I0314 00:21:06.975010 1964718 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock
I0314 00:21:06.978629 1964718 start.go:562] Will wait 60s for crictl version
I0314 00:21:06.978724 1964718 ssh_runner.go:195] Run: which crictl
I0314 00:21:06.982128 1964718 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0314 00:21:07.023598 1964718 start.go:578] Version: 0.1.0
RuntimeName: containerd
RuntimeVersion: 1.6.28
RuntimeApiVersion: v1
I0314 00:21:07.023718 1964718 ssh_runner.go:195] Run: containerd --version
I0314 00:21:07.047573 1964718 ssh_runner.go:195] Run: containerd --version
I0314 00:21:07.072566 1964718 out.go:177] * Preparing Kubernetes v1.28.4 on containerd 1.6.28 ...
I0314 00:21:07.074666 1964718 cli_runner.go:164] Run: docker network inspect addons-122411 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0314 00:21:07.089734 1964718 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I0314 00:21:07.093528 1964718 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0314 00:21:07.104622 1964718 kubeadm.go:877] updating cluster {Name:addons-122411 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1710284843-18375@sha256:d67c38c9fc2ad14c48d95e17cbac49314325db5758d8f7b3de60b927e62ce94f Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.4 ClusterName:addons-122411 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNa
mes:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.28.4 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false Cus
tomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I0314 00:21:07.104748 1964718 preload.go:132] Checking if preload exists for k8s version v1.28.4 and runtime containerd
I0314 00:21:07.104819 1964718 ssh_runner.go:195] Run: sudo crictl images --output json
I0314 00:21:07.146089 1964718 containerd.go:612] all images are preloaded for containerd runtime.
I0314 00:21:07.146115 1964718 containerd.go:519] Images already preloaded, skipping extraction
I0314 00:21:07.146206 1964718 ssh_runner.go:195] Run: sudo crictl images --output json
I0314 00:21:07.182275 1964718 containerd.go:612] all images are preloaded for containerd runtime.
I0314 00:21:07.182298 1964718 cache_images.go:84] Images are preloaded, skipping loading
I0314 00:21:07.182306 1964718 kubeadm.go:928] updating node { 192.168.49.2 8443 v1.28.4 containerd true true} ...
I0314 00:21:07.182412 1964718 kubeadm.go:940] kubelet [Unit]
Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.28.4/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=addons-122411 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.2
[Install]
config:
{KubernetesVersion:v1.28.4 ClusterName:addons-122411 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0314 00:21:07.182486 1964718 ssh_runner.go:195] Run: sudo crictl info
I0314 00:21:07.218396 1964718 cni.go:84] Creating CNI manager for ""
I0314 00:21:07.218423 1964718 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I0314 00:21:07.218432 1964718 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
I0314 00:21:07.218486 1964718 kubeadm.go:181] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8443 KubernetesVersion:v1.28.4 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:addons-122411 NodeName:addons-122411 DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.49.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc
/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///run/containerd/containerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I0314 00:21:07.218657 1964718 kubeadm.go:187] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.49.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
name: "addons-122411"
kubeletExtraArgs:
node-ip: 192.168.49.2
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.28.4
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%!"(MISSING)
nodefs.inodesFree: "0%!"(MISSING)
imagefs.available: "0%!"(MISSING)
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I0314 00:21:07.218738 1964718 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.28.4
I0314 00:21:07.227391 1964718 binaries.go:44] Found k8s binaries, skipping transfer
I0314 00:21:07.227467 1964718 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I0314 00:21:07.236041 1964718 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (317 bytes)
I0314 00:21:07.255161 1964718 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0314 00:21:07.273098 1964718 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2167 bytes)
I0314 00:21:07.291409 1964718 ssh_runner.go:195] Run: grep 192.168.49.2 control-plane.minikube.internal$ /etc/hosts
I0314 00:21:07.295167 1964718 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0314 00:21:07.306057 1964718 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0314 00:21:07.395887 1964718 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0314 00:21:07.416191 1964718 certs.go:68] Setting up /home/jenkins/minikube-integration/18375-1958430/.minikube/profiles/addons-122411 for IP: 192.168.49.2
I0314 00:21:07.416266 1964718 certs.go:194] generating shared ca certs ...
I0314 00:21:07.416296 1964718 certs.go:226] acquiring lock for ca certs: {Name:mka77573162012513ec65b9398fcff30bed9742a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0314 00:21:07.416851 1964718 certs.go:240] generating "minikubeCA" ca cert: /home/jenkins/minikube-integration/18375-1958430/.minikube/ca.key
I0314 00:21:07.913061 1964718 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/18375-1958430/.minikube/ca.crt ...
I0314 00:21:07.913091 1964718 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/18375-1958430/.minikube/ca.crt: {Name:mk9617ca7b421c9e0f4ae545b30ab46dcf93e5e9 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0314 00:21:07.913319 1964718 crypto.go:164] Writing key to /home/jenkins/minikube-integration/18375-1958430/.minikube/ca.key ...
I0314 00:21:07.913334 1964718 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/18375-1958430/.minikube/ca.key: {Name:mk728dfa98dba73d9d122892bf9cd7dba060e6cd Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0314 00:21:07.913493 1964718 certs.go:240] generating "proxyClientCA" ca cert: /home/jenkins/minikube-integration/18375-1958430/.minikube/proxy-client-ca.key
I0314 00:21:08.834109 1964718 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/18375-1958430/.minikube/proxy-client-ca.crt ...
I0314 00:21:08.834141 1964718 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/18375-1958430/.minikube/proxy-client-ca.crt: {Name:mk711ffb2c559c41ba96100f40957a791aad228b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0314 00:21:08.834335 1964718 crypto.go:164] Writing key to /home/jenkins/minikube-integration/18375-1958430/.minikube/proxy-client-ca.key ...
I0314 00:21:08.834348 1964718 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/18375-1958430/.minikube/proxy-client-ca.key: {Name:mk36e88d5ffb4c3e3fbed3cae42396a7e18897e8 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0314 00:21:08.834429 1964718 certs.go:256] generating profile certs ...
I0314 00:21:08.834490 1964718 certs.go:363] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/18375-1958430/.minikube/profiles/addons-122411/client.key
I0314 00:21:08.834506 1964718 crypto.go:68] Generating cert /home/jenkins/minikube-integration/18375-1958430/.minikube/profiles/addons-122411/client.crt with IP's: []
I0314 00:21:09.214268 1964718 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/18375-1958430/.minikube/profiles/addons-122411/client.crt ...
I0314 00:21:09.214300 1964718 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/18375-1958430/.minikube/profiles/addons-122411/client.crt: {Name:mk90963949c35f323f6ba09bc2a6fe76b783f003 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0314 00:21:09.214593 1964718 crypto.go:164] Writing key to /home/jenkins/minikube-integration/18375-1958430/.minikube/profiles/addons-122411/client.key ...
I0314 00:21:09.214613 1964718 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/18375-1958430/.minikube/profiles/addons-122411/client.key: {Name:mkb343a069b8a709c63a2f0ad3c91a7c54502c85 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0314 00:21:09.214729 1964718 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/18375-1958430/.minikube/profiles/addons-122411/apiserver.key.d086d0a7
I0314 00:21:09.214757 1964718 crypto.go:68] Generating cert /home/jenkins/minikube-integration/18375-1958430/.minikube/profiles/addons-122411/apiserver.crt.d086d0a7 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.49.2]
I0314 00:21:09.771877 1964718 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/18375-1958430/.minikube/profiles/addons-122411/apiserver.crt.d086d0a7 ...
I0314 00:21:09.771948 1964718 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/18375-1958430/.minikube/profiles/addons-122411/apiserver.crt.d086d0a7: {Name:mk4163dc53db6e61bde0a2aaeebc0a5764adc32b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0314 00:21:09.772757 1964718 crypto.go:164] Writing key to /home/jenkins/minikube-integration/18375-1958430/.minikube/profiles/addons-122411/apiserver.key.d086d0a7 ...
I0314 00:21:09.772774 1964718 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/18375-1958430/.minikube/profiles/addons-122411/apiserver.key.d086d0a7: {Name:mk878e782e54fe6cd1cfa78e83f9587e99ec19bc Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0314 00:21:09.773353 1964718 certs.go:381] copying /home/jenkins/minikube-integration/18375-1958430/.minikube/profiles/addons-122411/apiserver.crt.d086d0a7 -> /home/jenkins/minikube-integration/18375-1958430/.minikube/profiles/addons-122411/apiserver.crt
I0314 00:21:09.773479 1964718 certs.go:385] copying /home/jenkins/minikube-integration/18375-1958430/.minikube/profiles/addons-122411/apiserver.key.d086d0a7 -> /home/jenkins/minikube-integration/18375-1958430/.minikube/profiles/addons-122411/apiserver.key
I0314 00:21:09.773537 1964718 certs.go:363] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/18375-1958430/.minikube/profiles/addons-122411/proxy-client.key
I0314 00:21:09.773559 1964718 crypto.go:68] Generating cert /home/jenkins/minikube-integration/18375-1958430/.minikube/profiles/addons-122411/proxy-client.crt with IP's: []
I0314 00:21:10.169281 1964718 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/18375-1958430/.minikube/profiles/addons-122411/proxy-client.crt ...
I0314 00:21:10.169316 1964718 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/18375-1958430/.minikube/profiles/addons-122411/proxy-client.crt: {Name:mk7e508824249aa5b49fe9f3ccafee17d9a555e4 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0314 00:21:10.169510 1964718 crypto.go:164] Writing key to /home/jenkins/minikube-integration/18375-1958430/.minikube/profiles/addons-122411/proxy-client.key ...
I0314 00:21:10.169525 1964718 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/18375-1958430/.minikube/profiles/addons-122411/proxy-client.key: {Name:mk040fc6bc0f7930ba905b638829c84a9072ebfd Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0314 00:21:10.170098 1964718 certs.go:484] found cert: /home/jenkins/minikube-integration/18375-1958430/.minikube/certs/ca-key.pem (1679 bytes)
I0314 00:21:10.170144 1964718 certs.go:484] found cert: /home/jenkins/minikube-integration/18375-1958430/.minikube/certs/ca.pem (1078 bytes)
I0314 00:21:10.170182 1964718 certs.go:484] found cert: /home/jenkins/minikube-integration/18375-1958430/.minikube/certs/cert.pem (1123 bytes)
I0314 00:21:10.170210 1964718 certs.go:484] found cert: /home/jenkins/minikube-integration/18375-1958430/.minikube/certs/key.pem (1675 bytes)
I0314 00:21:10.170880 1964718 ssh_runner.go:362] scp /home/jenkins/minikube-integration/18375-1958430/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0314 00:21:10.197460 1964718 ssh_runner.go:362] scp /home/jenkins/minikube-integration/18375-1958430/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
I0314 00:21:10.224075 1964718 ssh_runner.go:362] scp /home/jenkins/minikube-integration/18375-1958430/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0314 00:21:10.249068 1964718 ssh_runner.go:362] scp /home/jenkins/minikube-integration/18375-1958430/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I0314 00:21:10.274323 1964718 ssh_runner.go:362] scp /home/jenkins/minikube-integration/18375-1958430/.minikube/profiles/addons-122411/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1419 bytes)
I0314 00:21:10.298588 1964718 ssh_runner.go:362] scp /home/jenkins/minikube-integration/18375-1958430/.minikube/profiles/addons-122411/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
I0314 00:21:10.323086 1964718 ssh_runner.go:362] scp /home/jenkins/minikube-integration/18375-1958430/.minikube/profiles/addons-122411/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0314 00:21:10.347064 1964718 ssh_runner.go:362] scp /home/jenkins/minikube-integration/18375-1958430/.minikube/profiles/addons-122411/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes)
I0314 00:21:10.370965 1964718 ssh_runner.go:362] scp /home/jenkins/minikube-integration/18375-1958430/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0314 00:21:10.395321 1964718 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I0314 00:21:10.413782 1964718 ssh_runner.go:195] Run: openssl version
I0314 00:21:10.419651 1964718 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0314 00:21:10.429050 1964718 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0314 00:21:10.432417 1964718 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Mar 14 00:21 /usr/share/ca-certificates/minikubeCA.pem
I0314 00:21:10.432490 1964718 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0314 00:21:10.439479 1964718 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0314 00:21:10.448722 1964718 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0314 00:21:10.451925 1964718 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I0314 00:21:10.451998 1964718 kubeadm.go:391] StartCluster: {Name:addons-122411 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.42-1710284843-18375@sha256:d67c38c9fc2ad14c48d95e17cbac49314325db5758d8f7b3de60b927e62ce94f Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.4 ClusterName:addons-122411 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames
:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.28.4 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false Custom
QemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0314 00:21:10.452086 1964718 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]}
I0314 00:21:10.452168 1964718 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system"
I0314 00:21:10.493696 1964718 cri.go:89] found id: ""
I0314 00:21:10.493778 1964718 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I0314 00:21:10.506260 1964718 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I0314 00:21:10.515912 1964718 kubeadm.go:213] ignoring SystemVerification for kubeadm because of docker driver
I0314 00:21:10.515992 1964718 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I0314 00:21:10.528049 1964718 kubeadm.go:154] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I0314 00:21:10.528070 1964718 kubeadm.go:156] found existing configuration files:
I0314 00:21:10.528127 1964718 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I0314 00:21:10.538230 1964718 kubeadm.go:162] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I0314 00:21:10.538316 1964718 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I0314 00:21:10.547526 1964718 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I0314 00:21:10.557785 1964718 kubeadm.go:162] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I0314 00:21:10.557869 1964718 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I0314 00:21:10.566557 1964718 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I0314 00:21:10.575893 1964718 kubeadm.go:162] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I0314 00:21:10.575957 1964718 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I0314 00:21:10.585058 1964718 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I0314 00:21:10.594178 1964718 kubeadm.go:162] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I0314 00:21:10.594253 1964718 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I0314 00:21:10.602962 1964718 ssh_runner.go:286] Start: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.28.4:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I0314 00:21:10.648826 1964718 kubeadm.go:309] [init] Using Kubernetes version: v1.28.4
I0314 00:21:10.649039 1964718 kubeadm.go:309] [preflight] Running pre-flight checks
I0314 00:21:10.689230 1964718 kubeadm.go:309] [preflight] The system verification failed. Printing the output from the verification:
I0314 00:21:10.689305 1964718 kubeadm.go:309] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1055-aws[0m
I0314 00:21:10.689343 1964718 kubeadm.go:309] [0;37mOS[0m: [0;32mLinux[0m
I0314 00:21:10.689392 1964718 kubeadm.go:309] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I0314 00:21:10.689460 1964718 kubeadm.go:309] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I0314 00:21:10.689508 1964718 kubeadm.go:309] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I0314 00:21:10.689558 1964718 kubeadm.go:309] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I0314 00:21:10.689608 1964718 kubeadm.go:309] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I0314 00:21:10.689658 1964718 kubeadm.go:309] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I0314 00:21:10.689706 1964718 kubeadm.go:309] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I0314 00:21:10.689759 1964718 kubeadm.go:309] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I0314 00:21:10.689807 1964718 kubeadm.go:309] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I0314 00:21:10.765275 1964718 kubeadm.go:309] [preflight] Pulling images required for setting up a Kubernetes cluster
I0314 00:21:10.765434 1964718 kubeadm.go:309] [preflight] This might take a minute or two, depending on the speed of your internet connection
I0314 00:21:10.765542 1964718 kubeadm.go:309] [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
I0314 00:21:10.986714 1964718 kubeadm.go:309] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I0314 00:21:10.990549 1964718 out.go:204] - Generating certificates and keys ...
I0314 00:21:10.990721 1964718 kubeadm.go:309] [certs] Using existing ca certificate authority
I0314 00:21:10.990835 1964718 kubeadm.go:309] [certs] Using existing apiserver certificate and key on disk
I0314 00:21:11.603556 1964718 kubeadm.go:309] [certs] Generating "apiserver-kubelet-client" certificate and key
I0314 00:21:11.874177 1964718 kubeadm.go:309] [certs] Generating "front-proxy-ca" certificate and key
I0314 00:21:12.373310 1964718 kubeadm.go:309] [certs] Generating "front-proxy-client" certificate and key
I0314 00:21:13.180291 1964718 kubeadm.go:309] [certs] Generating "etcd/ca" certificate and key
I0314 00:21:13.462152 1964718 kubeadm.go:309] [certs] Generating "etcd/server" certificate and key
I0314 00:21:13.462449 1964718 kubeadm.go:309] [certs] etcd/server serving cert is signed for DNS names [addons-122411 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0314 00:21:13.862723 1964718 kubeadm.go:309] [certs] Generating "etcd/peer" certificate and key
I0314 00:21:13.863034 1964718 kubeadm.go:309] [certs] etcd/peer serving cert is signed for DNS names [addons-122411 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0314 00:21:14.748039 1964718 kubeadm.go:309] [certs] Generating "etcd/healthcheck-client" certificate and key
I0314 00:21:15.581971 1964718 kubeadm.go:309] [certs] Generating "apiserver-etcd-client" certificate and key
I0314 00:21:15.850198 1964718 kubeadm.go:309] [certs] Generating "sa" key and public key
I0314 00:21:15.850481 1964718 kubeadm.go:309] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I0314 00:21:16.039583 1964718 kubeadm.go:309] [kubeconfig] Writing "admin.conf" kubeconfig file
I0314 00:21:16.590513 1964718 kubeadm.go:309] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I0314 00:21:16.912211 1964718 kubeadm.go:309] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I0314 00:21:17.184871 1964718 kubeadm.go:309] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I0314 00:21:17.185486 1964718 kubeadm.go:309] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I0314 00:21:17.188252 1964718 kubeadm.go:309] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I0314 00:21:17.191015 1964718 out.go:204] - Booting up control plane ...
I0314 00:21:17.191118 1964718 kubeadm.go:309] [control-plane] Creating static Pod manifest for "kube-apiserver"
I0314 00:21:17.191193 1964718 kubeadm.go:309] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I0314 00:21:17.193476 1964718 kubeadm.go:309] [control-plane] Creating static Pod manifest for "kube-scheduler"
I0314 00:21:17.208928 1964718 kubeadm.go:309] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I0314 00:21:17.209024 1964718 kubeadm.go:309] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I0314 00:21:17.209063 1964718 kubeadm.go:309] [kubelet-start] Starting the kubelet
I0314 00:21:17.315697 1964718 kubeadm.go:309] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
I0314 00:21:23.810193 1964718 kubeadm.go:309] [apiclient] All control plane components are healthy after 6.502110 seconds
I0314 00:21:23.810314 1964718 kubeadm.go:309] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I0314 00:21:23.825572 1964718 kubeadm.go:309] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I0314 00:21:24.351174 1964718 kubeadm.go:309] [upload-certs] Skipping phase. Please see --upload-certs
I0314 00:21:24.351388 1964718 kubeadm.go:309] [mark-control-plane] Marking the node addons-122411 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I0314 00:21:24.863650 1964718 kubeadm.go:309] [bootstrap-token] Using token: d0efvz.p5fbzk0vpfz99a1c
I0314 00:21:24.865747 1964718 out.go:204] - Configuring RBAC rules ...
I0314 00:21:24.865884 1964718 kubeadm.go:309] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I0314 00:21:24.875024 1964718 kubeadm.go:309] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I0314 00:21:24.883188 1964718 kubeadm.go:309] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I0314 00:21:24.887327 1964718 kubeadm.go:309] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I0314 00:21:24.891491 1964718 kubeadm.go:309] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I0314 00:21:24.895778 1964718 kubeadm.go:309] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I0314 00:21:24.909632 1964718 kubeadm.go:309] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I0314 00:21:25.137952 1964718 kubeadm.go:309] [addons] Applied essential addon: CoreDNS
I0314 00:21:25.283878 1964718 kubeadm.go:309] [addons] Applied essential addon: kube-proxy
I0314 00:21:25.286110 1964718 kubeadm.go:309]
I0314 00:21:25.286188 1964718 kubeadm.go:309] Your Kubernetes control-plane has initialized successfully!
I0314 00:21:25.286199 1964718 kubeadm.go:309]
I0314 00:21:25.286274 1964718 kubeadm.go:309] To start using your cluster, you need to run the following as a regular user:
I0314 00:21:25.286283 1964718 kubeadm.go:309]
I0314 00:21:25.286307 1964718 kubeadm.go:309] mkdir -p $HOME/.kube
I0314 00:21:25.286368 1964718 kubeadm.go:309] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I0314 00:21:25.286420 1964718 kubeadm.go:309] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I0314 00:21:25.286428 1964718 kubeadm.go:309]
I0314 00:21:25.286480 1964718 kubeadm.go:309] Alternatively, if you are the root user, you can run:
I0314 00:21:25.286488 1964718 kubeadm.go:309]
I0314 00:21:25.286551 1964718 kubeadm.go:309] export KUBECONFIG=/etc/kubernetes/admin.conf
I0314 00:21:25.286558 1964718 kubeadm.go:309]
I0314 00:21:25.286609 1964718 kubeadm.go:309] You should now deploy a pod network to the cluster.
I0314 00:21:25.286685 1964718 kubeadm.go:309] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I0314 00:21:25.286754 1964718 kubeadm.go:309] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I0314 00:21:25.286763 1964718 kubeadm.go:309]
I0314 00:21:25.286844 1964718 kubeadm.go:309] You can now join any number of control-plane nodes by copying certificate authorities
I0314 00:21:25.286921 1964718 kubeadm.go:309] and service account keys on each node and then running the following as root:
I0314 00:21:25.286929 1964718 kubeadm.go:309]
I0314 00:21:25.287009 1964718 kubeadm.go:309] kubeadm join control-plane.minikube.internal:8443 --token d0efvz.p5fbzk0vpfz99a1c \
I0314 00:21:25.287111 1964718 kubeadm.go:309] --discovery-token-ca-cert-hash sha256:644f2669e4ceaaa79da07ee7b0c25bc89ffedad1f907d006eecbfc00d6f5ae7a \
I0314 00:21:25.287135 1964718 kubeadm.go:309] --control-plane
I0314 00:21:25.287139 1964718 kubeadm.go:309]
I0314 00:21:25.287233 1964718 kubeadm.go:309] Then you can join any number of worker nodes by running the following on each as root:
I0314 00:21:25.287238 1964718 kubeadm.go:309]
I0314 00:21:25.287316 1964718 kubeadm.go:309] kubeadm join control-plane.minikube.internal:8443 --token d0efvz.p5fbzk0vpfz99a1c \
I0314 00:21:25.287413 1964718 kubeadm.go:309] --discovery-token-ca-cert-hash sha256:644f2669e4ceaaa79da07ee7b0c25bc89ffedad1f907d006eecbfc00d6f5ae7a
I0314 00:21:25.289868 1964718 kubeadm.go:309] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1055-aws\n", err: exit status 1
I0314 00:21:25.289990 1964718 kubeadm.go:309] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I0314 00:21:25.290067 1964718 cni.go:84] Creating CNI manager for ""
I0314 00:21:25.290101 1964718 cni.go:143] "docker" driver + "containerd" runtime found, recommending kindnet
I0314 00:21:25.292854 1964718 out.go:177] * Configuring CNI (Container Networking Interface) ...
I0314 00:21:25.295125 1964718 ssh_runner.go:195] Run: stat /opt/cni/bin/portmap
I0314 00:21:25.299678 1964718 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.28.4/kubectl ...
I0314 00:21:25.299697 1964718 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (2438 bytes)
I0314 00:21:25.318981 1964718 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml
I0314 00:21:26.340677 1964718 ssh_runner.go:235] Completed: sudo /var/lib/minikube/binaries/v1.28.4/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml: (1.021658137s)
I0314 00:21:26.340716 1964718 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I0314 00:21:26.340833 1964718 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I0314 00:21:26.340925 1964718 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes addons-122411 minikube.k8s.io/updated_at=2024_03_14T00_21_26_0700 minikube.k8s.io/version=v1.32.0 minikube.k8s.io/commit=eceebabcbdee8f7e371d6df61e2829908b6c6abe minikube.k8s.io/name=addons-122411 minikube.k8s.io/primary=true
I0314 00:21:26.375914 1964718 ops.go:34] apiserver oom_adj: -16
I0314 00:21:26.518772 1964718 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0314 00:21:27.019559 1964718 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0314 00:21:27.519438 1964718 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0314 00:21:28.019667 1964718 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0314 00:21:28.518931 1964718 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0314 00:21:29.018949 1964718 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0314 00:21:29.519743 1964718 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0314 00:21:30.020352 1964718 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0314 00:21:30.519777 1964718 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0314 00:21:31.019257 1964718 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0314 00:21:31.519468 1964718 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0314 00:21:32.018909 1964718 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0314 00:21:32.519638 1964718 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0314 00:21:33.019538 1964718 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0314 00:21:33.518963 1964718 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0314 00:21:34.019251 1964718 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0314 00:21:34.519188 1964718 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0314 00:21:35.019382 1964718 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0314 00:21:35.519700 1964718 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0314 00:21:36.018929 1964718 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0314 00:21:36.519071 1964718 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0314 00:21:37.018935 1964718 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0314 00:21:37.519662 1964718 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.28.4/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0314 00:21:37.607266 1964718 kubeadm.go:1106] duration metric: took 11.266477993s to wait for elevateKubeSystemPrivileges
W0314 00:21:37.607301 1964718 kubeadm.go:286] apiserver tunnel failed: apiserver port not set
I0314 00:21:37.607309 1964718 kubeadm.go:393] duration metric: took 27.155341393s to StartCluster
I0314 00:21:37.607324 1964718 settings.go:142] acquiring lock: {Name:mkb041dc79ae1947b27d39dd7ebbd3bd473ee07d Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0314 00:21:37.607437 1964718 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/18375-1958430/kubeconfig
I0314 00:21:37.607829 1964718 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/18375-1958430/kubeconfig: {Name:mkdddca847fdd161b32ac7434f6b37d491dbdecd Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0314 00:21:37.608508 1964718 start.go:234] Will wait 6m0s for node &{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.28.4 ContainerRuntime:containerd ControlPlane:true Worker:true}
I0314 00:21:37.610920 1964718 out.go:177] * Verifying Kubernetes components...
I0314 00:21:37.608643 1964718 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.4/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I0314 00:21:37.608824 1964718 config.go:182] Loaded profile config "addons-122411": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.4
I0314 00:21:37.608833 1964718 addons.go:502] enable addons start: toEnable=map[ambassador:false auto-pause:false cloud-spanner:true csi-hostpath-driver:true dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:true gvisor:false headlamp:false helm-tiller:false inaccel:false ingress:true ingress-dns:true inspektor-gadget:true istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:true nvidia-device-plugin:true nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:true registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:true volumesnapshots:true yakd:true]
I0314 00:21:37.612996 1964718 addons.go:69] Setting yakd=true in profile "addons-122411"
I0314 00:21:37.613023 1964718 addons.go:234] Setting addon yakd=true in "addons-122411"
I0314 00:21:37.613061 1964718 host.go:66] Checking if "addons-122411" exists ...
I0314 00:21:37.613587 1964718 cli_runner.go:164] Run: docker container inspect addons-122411 --format={{.State.Status}}
I0314 00:21:37.613656 1964718 addons.go:69] Setting ingress=true in profile "addons-122411"
I0314 00:21:37.613687 1964718 addons.go:234] Setting addon ingress=true in "addons-122411"
I0314 00:21:37.613721 1964718 host.go:66] Checking if "addons-122411" exists ...
I0314 00:21:37.614104 1964718 cli_runner.go:164] Run: docker container inspect addons-122411 --format={{.State.Status}}
I0314 00:21:37.614592 1964718 addons.go:69] Setting ingress-dns=true in profile "addons-122411"
I0314 00:21:37.614633 1964718 addons.go:234] Setting addon ingress-dns=true in "addons-122411"
I0314 00:21:37.614668 1964718 host.go:66] Checking if "addons-122411" exists ...
I0314 00:21:37.614724 1964718 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0314 00:21:37.614941 1964718 addons.go:69] Setting cloud-spanner=true in profile "addons-122411"
I0314 00:21:37.615010 1964718 addons.go:234] Setting addon cloud-spanner=true in "addons-122411"
I0314 00:21:37.615052 1964718 host.go:66] Checking if "addons-122411" exists ...
I0314 00:21:37.615173 1964718 cli_runner.go:164] Run: docker container inspect addons-122411 --format={{.State.Status}}
I0314 00:21:37.615607 1964718 cli_runner.go:164] Run: docker container inspect addons-122411 --format={{.State.Status}}
I0314 00:21:37.618290 1964718 addons.go:69] Setting csi-hostpath-driver=true in profile "addons-122411"
I0314 00:21:37.618370 1964718 addons.go:234] Setting addon csi-hostpath-driver=true in "addons-122411"
I0314 00:21:37.618403 1964718 host.go:66] Checking if "addons-122411" exists ...
I0314 00:21:37.618819 1964718 cli_runner.go:164] Run: docker container inspect addons-122411 --format={{.State.Status}}
I0314 00:21:37.622967 1964718 addons.go:69] Setting default-storageclass=true in profile "addons-122411"
I0314 00:21:37.623019 1964718 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "addons-122411"
I0314 00:21:37.623372 1964718 cli_runner.go:164] Run: docker container inspect addons-122411 --format={{.State.Status}}
I0314 00:21:37.623597 1964718 addons.go:69] Setting inspektor-gadget=true in profile "addons-122411"
I0314 00:21:37.623678 1964718 addons.go:234] Setting addon inspektor-gadget=true in "addons-122411"
I0314 00:21:37.623749 1964718 host.go:66] Checking if "addons-122411" exists ...
I0314 00:21:37.627451 1964718 cli_runner.go:164] Run: docker container inspect addons-122411 --format={{.State.Status}}
I0314 00:21:37.659735 1964718 addons.go:69] Setting gcp-auth=true in profile "addons-122411"
I0314 00:21:37.659858 1964718 mustload.go:65] Loading cluster: addons-122411
I0314 00:21:37.660079 1964718 config.go:182] Loaded profile config "addons-122411": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.28.4
I0314 00:21:37.663691 1964718 addons.go:69] Setting metrics-server=true in profile "addons-122411"
I0314 00:21:37.663866 1964718 addons.go:234] Setting addon metrics-server=true in "addons-122411"
I0314 00:21:37.663932 1964718 host.go:66] Checking if "addons-122411" exists ...
I0314 00:21:37.665172 1964718 cli_runner.go:164] Run: docker container inspect addons-122411 --format={{.State.Status}}
I0314 00:21:37.672744 1964718 out.go:177] - Using image registry.k8s.io/ingress-nginx/controller:v1.10.0
I0314 00:21:37.674995 1964718 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.0
I0314 00:21:37.677125 1964718 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.0
I0314 00:21:37.682188 1964718 addons.go:426] installing /etc/kubernetes/addons/ingress-deploy.yaml
I0314 00:21:37.682213 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ingress-deploy.yaml (16078 bytes)
I0314 00:21:37.682285 1964718 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-122411
I0314 00:21:37.663759 1964718 cli_runner.go:164] Run: docker container inspect addons-122411 --format={{.State.Status}}
I0314 00:21:37.702049 1964718 out.go:177] - Using image gcr.io/cloud-spanner-emulator/emulator:1.5.14
I0314 00:21:37.704332 1964718 addons.go:426] installing /etc/kubernetes/addons/deployment.yaml
I0314 00:21:37.704356 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/deployment.yaml (1004 bytes)
I0314 00:21:37.704424 1964718 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-122411
I0314 00:21:37.714220 1964718 addons.go:69] Setting nvidia-device-plugin=true in profile "addons-122411"
I0314 00:21:37.714275 1964718 addons.go:234] Setting addon nvidia-device-plugin=true in "addons-122411"
I0314 00:21:37.714320 1964718 host.go:66] Checking if "addons-122411" exists ...
I0314 00:21:37.714793 1964718 cli_runner.go:164] Run: docker container inspect addons-122411 --format={{.State.Status}}
I0314 00:21:37.727434 1964718 addons.go:69] Setting registry=true in profile "addons-122411"
I0314 00:21:37.727478 1964718 addons.go:234] Setting addon registry=true in "addons-122411"
I0314 00:21:37.727523 1964718 host.go:66] Checking if "addons-122411" exists ...
I0314 00:21:37.727962 1964718 cli_runner.go:164] Run: docker container inspect addons-122411 --format={{.State.Status}}
I0314 00:21:37.738301 1964718 addons.go:69] Setting storage-provisioner=true in profile "addons-122411"
I0314 00:21:37.738343 1964718 addons.go:234] Setting addon storage-provisioner=true in "addons-122411"
I0314 00:21:37.738381 1964718 host.go:66] Checking if "addons-122411" exists ...
I0314 00:21:37.742378 1964718 cli_runner.go:164] Run: docker container inspect addons-122411 --format={{.State.Status}}
I0314 00:21:37.760950 1964718 addons.go:69] Setting storage-provisioner-rancher=true in profile "addons-122411"
I0314 00:21:37.761056 1964718 addons_storage_classes.go:33] enableOrDisableStorageClasses storage-provisioner-rancher=true on "addons-122411"
I0314 00:21:37.761424 1964718 cli_runner.go:164] Run: docker container inspect addons-122411 --format={{.State.Status}}
I0314 00:21:37.774259 1964718 addons.go:69] Setting volumesnapshots=true in profile "addons-122411"
I0314 00:21:37.774361 1964718 addons.go:234] Setting addon volumesnapshots=true in "addons-122411"
I0314 00:21:37.774433 1964718 host.go:66] Checking if "addons-122411" exists ...
I0314 00:21:37.796925 1964718 out.go:177] - Using image gcr.io/k8s-minikube/minikube-ingress-dns:0.0.2
I0314 00:21:37.800444 1964718 addons.go:426] installing /etc/kubernetes/addons/ingress-dns-pod.yaml
I0314 00:21:37.800512 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ingress-dns-pod.yaml (2442 bytes)
I0314 00:21:37.800614 1964718 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-122411
I0314 00:21:37.816799 1964718 out.go:177] - Using image registry.k8s.io/sig-storage/livenessprobe:v2.8.0
I0314 00:21:37.814370 1964718 addons.go:234] Setting addon default-storageclass=true in "addons-122411"
I0314 00:21:37.775010 1964718 cli_runner.go:164] Run: docker container inspect addons-122411 --format={{.State.Status}}
I0314 00:21:37.820744 1964718 host.go:66] Checking if "addons-122411" exists ...
I0314 00:21:37.826235 1964718 out.go:177] - Using image docker.io/marcnuri/yakd:0.0.4
I0314 00:21:37.841845 1964718 addons.go:426] installing /etc/kubernetes/addons/yakd-ns.yaml
I0314 00:21:37.841921 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/yakd-ns.yaml (171 bytes)
I0314 00:21:37.842001 1964718 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-122411
I0314 00:21:37.854293 1964718 out.go:177] - Using image registry.k8s.io/metrics-server/metrics-server:v0.7.0
I0314 00:21:37.863311 1964718 addons.go:426] installing /etc/kubernetes/addons/metrics-apiservice.yaml
I0314 00:21:37.863345 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-apiservice.yaml (424 bytes)
I0314 00:21:37.863426 1964718 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-122411
I0314 00:21:37.860218 1964718 out.go:177] - Using image ghcr.io/inspektor-gadget/inspektor-gadget:v0.26.0
I0314 00:21:37.900830 1964718 addons.go:426] installing /etc/kubernetes/addons/ig-namespace.yaml
I0314 00:21:37.900901 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-namespace.yaml (55 bytes)
I0314 00:21:37.901007 1964718 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-122411
I0314 00:21:37.908078 1964718 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35041 SSHKeyPath:/home/jenkins/minikube-integration/18375-1958430/.minikube/machines/addons-122411/id_rsa Username:docker}
I0314 00:21:37.860230 1964718 out.go:177] - Using image registry.k8s.io/sig-storage/csi-resizer:v1.6.0
I0314 00:21:37.860736 1964718 cli_runner.go:164] Run: docker container inspect addons-122411 --format={{.State.Status}}
I0314 00:21:37.993637 1964718 host.go:66] Checking if "addons-122411" exists ...
I0314 00:21:37.995714 1964718 out.go:177] - Using image nvcr.io/nvidia/k8s-device-plugin:v0.14.5
I0314 00:21:37.998088 1964718 addons.go:426] installing /etc/kubernetes/addons/nvidia-device-plugin.yaml
I0314 00:21:37.998153 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/nvidia-device-plugin.yaml (1966 bytes)
I0314 00:21:37.998272 1964718 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-122411
I0314 00:21:38.058336 1964718 out.go:177] - Using image registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0
I0314 00:21:38.055461 1964718 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35041 SSHKeyPath:/home/jenkins/minikube-integration/18375-1958430/.minikube/machines/addons-122411/id_rsa Username:docker}
I0314 00:21:38.056323 1964718 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35041 SSHKeyPath:/home/jenkins/minikube-integration/18375-1958430/.minikube/machines/addons-122411/id_rsa Username:docker}
I0314 00:21:38.060034 1964718 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35041 SSHKeyPath:/home/jenkins/minikube-integration/18375-1958430/.minikube/machines/addons-122411/id_rsa Username:docker}
I0314 00:21:38.073817 1964718 out.go:177] - Using image registry.k8s.io/sig-storage/csi-provisioner:v3.3.0
I0314 00:21:38.077413 1964718 out.go:177] - Using image registry.k8s.io/sig-storage/csi-attacher:v4.0.0
I0314 00:21:38.077002 1964718 out.go:177] - Using image docker.io/registry:2.8.3
I0314 00:21:38.077317 1964718 out.go:177] - Using image registry.k8s.io/sig-storage/snapshot-controller:v6.1.0
I0314 00:21:38.080983 1964718 out.go:177] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I0314 00:21:38.082848 1964718 addons.go:426] installing /etc/kubernetes/addons/storage-provisioner.yaml
I0314 00:21:38.082869 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I0314 00:21:38.082940 1964718 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-122411
I0314 00:21:38.090116 1964718 out.go:177] - Using image gcr.io/k8s-minikube/kube-registry-proxy:0.0.5
I0314 00:21:38.092020 1964718 addons.go:426] installing /etc/kubernetes/addons/registry-rc.yaml
I0314 00:21:38.092043 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-rc.yaml (798 bytes)
I0314 00:21:38.092108 1964718 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-122411
I0314 00:21:38.097295 1964718 out.go:177] - Using image registry.k8s.io/sig-storage/csi-external-health-monitor-controller:v0.7.0
I0314 00:21:38.097201 1964718 addons.go:234] Setting addon storage-provisioner-rancher=true in "addons-122411"
I0314 00:21:38.116011 1964718 host.go:66] Checking if "addons-122411" exists ...
I0314 00:21:38.116559 1964718 cli_runner.go:164] Run: docker container inspect addons-122411 --format={{.State.Status}}
I0314 00:21:38.135086 1964718 out.go:177] - Using image registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.6.0
I0314 00:21:38.134682 1964718 addons.go:426] installing /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml
I0314 00:21:38.147733 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml (934 bytes)
I0314 00:21:38.147691 1964718 out.go:177] - Using image registry.k8s.io/sig-storage/hostpathplugin:v1.9.0
I0314 00:21:38.150086 1964718 addons.go:426] installing /etc/kubernetes/addons/rbac-external-attacher.yaml
I0314 00:21:38.150101 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/rbac-external-attacher.yaml (3073 bytes)
I0314 00:21:38.150155 1964718 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-122411
I0314 00:21:38.168042 1964718 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35041 SSHKeyPath:/home/jenkins/minikube-integration/18375-1958430/.minikube/machines/addons-122411/id_rsa Username:docker}
I0314 00:21:38.148261 1964718 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-122411
I0314 00:21:38.211545 1964718 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35041 SSHKeyPath:/home/jenkins/minikube-integration/18375-1958430/.minikube/machines/addons-122411/id_rsa Username:docker}
I0314 00:21:38.212808 1964718 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35041 SSHKeyPath:/home/jenkins/minikube-integration/18375-1958430/.minikube/machines/addons-122411/id_rsa Username:docker}
I0314 00:21:38.222965 1964718 addons.go:426] installing /etc/kubernetes/addons/storageclass.yaml
I0314 00:21:38.231470 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I0314 00:21:38.231559 1964718 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-122411
I0314 00:21:38.233997 1964718 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35041 SSHKeyPath:/home/jenkins/minikube-integration/18375-1958430/.minikube/machines/addons-122411/id_rsa Username:docker}
I0314 00:21:38.279535 1964718 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35041 SSHKeyPath:/home/jenkins/minikube-integration/18375-1958430/.minikube/machines/addons-122411/id_rsa Username:docker}
I0314 00:21:38.309627 1964718 out.go:177] - Using image docker.io/rancher/local-path-provisioner:v0.0.22
I0314 00:21:38.306730 1964718 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35041 SSHKeyPath:/home/jenkins/minikube-integration/18375-1958430/.minikube/machines/addons-122411/id_rsa Username:docker}
I0314 00:21:38.315061 1964718 out.go:177] - Using image docker.io/busybox:stable
I0314 00:21:38.316876 1964718 addons.go:426] installing /etc/kubernetes/addons/storage-provisioner-rancher.yaml
I0314 00:21:38.316895 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner-rancher.yaml (3113 bytes)
I0314 00:21:38.316961 1964718 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-122411
I0314 00:21:38.325150 1964718 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35041 SSHKeyPath:/home/jenkins/minikube-integration/18375-1958430/.minikube/machines/addons-122411/id_rsa Username:docker}
I0314 00:21:38.335129 1964718 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35041 SSHKeyPath:/home/jenkins/minikube-integration/18375-1958430/.minikube/machines/addons-122411/id_rsa Username:docker}
I0314 00:21:38.365040 1964718 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35041 SSHKeyPath:/home/jenkins/minikube-integration/18375-1958430/.minikube/machines/addons-122411/id_rsa Username:docker}
I0314 00:21:38.511856 1964718 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.4/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.28.4/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I0314 00:21:38.512035 1964718 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0314 00:21:38.611870 1964718 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/ingress-deploy.yaml
I0314 00:21:38.669031 1964718 addons.go:426] installing /etc/kubernetes/addons/metrics-server-deployment.yaml
I0314 00:21:38.669062 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-deployment.yaml (1907 bytes)
I0314 00:21:38.680353 1964718 addons.go:426] installing /etc/kubernetes/addons/yakd-sa.yaml
I0314 00:21:38.680433 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/yakd-sa.yaml (247 bytes)
I0314 00:21:38.753205 1964718 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/ingress-dns-pod.yaml
I0314 00:21:38.761307 1964718 addons.go:426] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml
I0314 00:21:38.761338 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml (6471 bytes)
I0314 00:21:38.771369 1964718 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/nvidia-device-plugin.yaml
I0314 00:21:38.849172 1964718 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/deployment.yaml
I0314 00:21:38.852120 1964718 addons.go:426] installing /etc/kubernetes/addons/metrics-server-rbac.yaml
I0314 00:21:38.852156 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-rbac.yaml (2175 bytes)
I0314 00:21:38.879955 1964718 addons.go:426] installing /etc/kubernetes/addons/registry-svc.yaml
I0314 00:21:38.879980 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-svc.yaml (398 bytes)
I0314 00:21:38.888918 1964718 addons.go:426] installing /etc/kubernetes/addons/yakd-crb.yaml
I0314 00:21:38.888947 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/yakd-crb.yaml (422 bytes)
I0314 00:21:38.890143 1964718 addons.go:426] installing /etc/kubernetes/addons/ig-serviceaccount.yaml
I0314 00:21:38.890162 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-serviceaccount.yaml (80 bytes)
I0314 00:21:38.914447 1964718 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I0314 00:21:38.924225 1964718 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I0314 00:21:38.968177 1964718 addons.go:426] installing /etc/kubernetes/addons/rbac-hostpath.yaml
I0314 00:21:38.968241 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/rbac-hostpath.yaml (4266 bytes)
I0314 00:21:38.971429 1964718 addons.go:426] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml
I0314 00:21:38.971491 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml (23126 bytes)
I0314 00:21:39.017281 1964718 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/storage-provisioner-rancher.yaml
I0314 00:21:39.025659 1964718 addons.go:426] installing /etc/kubernetes/addons/ig-role.yaml
I0314 00:21:39.025737 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-role.yaml (210 bytes)
I0314 00:21:39.088747 1964718 addons.go:426] installing /etc/kubernetes/addons/metrics-server-service.yaml
I0314 00:21:39.088823 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-service.yaml (446 bytes)
I0314 00:21:39.094028 1964718 addons.go:426] installing /etc/kubernetes/addons/yakd-svc.yaml
I0314 00:21:39.094104 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/yakd-svc.yaml (412 bytes)
I0314 00:21:39.134295 1964718 addons.go:426] installing /etc/kubernetes/addons/registry-proxy.yaml
I0314 00:21:39.134370 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-proxy.yaml (947 bytes)
I0314 00:21:39.196850 1964718 addons.go:426] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml
I0314 00:21:39.196925 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml (19582 bytes)
I0314 00:21:39.213156 1964718 addons.go:426] installing /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml
I0314 00:21:39.213432 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml (3038 bytes)
I0314 00:21:39.239100 1964718 addons.go:426] installing /etc/kubernetes/addons/ig-rolebinding.yaml
I0314 00:21:39.239164 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-rolebinding.yaml (244 bytes)
I0314 00:21:39.284104 1964718 addons.go:426] installing /etc/kubernetes/addons/yakd-dp.yaml
I0314 00:21:39.284166 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/yakd-dp.yaml (2017 bytes)
I0314 00:21:39.293521 1964718 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/registry-rc.yaml -f /etc/kubernetes/addons/registry-svc.yaml -f /etc/kubernetes/addons/registry-proxy.yaml
I0314 00:21:39.301463 1964718 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
I0314 00:21:39.374138 1964718 addons.go:426] installing /etc/kubernetes/addons/rbac-external-provisioner.yaml
I0314 00:21:39.374218 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/rbac-external-provisioner.yaml (4442 bytes)
I0314 00:21:39.382536 1964718 addons.go:426] installing /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml
I0314 00:21:39.382605 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml (3545 bytes)
I0314 00:21:39.452308 1964718 addons.go:426] installing /etc/kubernetes/addons/ig-clusterrole.yaml
I0314 00:21:39.452375 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-clusterrole.yaml (1485 bytes)
I0314 00:21:39.683553 1964718 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/yakd-ns.yaml -f /etc/kubernetes/addons/yakd-sa.yaml -f /etc/kubernetes/addons/yakd-crb.yaml -f /etc/kubernetes/addons/yakd-svc.yaml -f /etc/kubernetes/addons/yakd-dp.yaml
I0314 00:21:39.874258 1964718 addons.go:426] installing /etc/kubernetes/addons/ig-clusterrolebinding.yaml
I0314 00:21:39.874329 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-clusterrolebinding.yaml (274 bytes)
I0314 00:21:39.878128 1964718 addons.go:426] installing /etc/kubernetes/addons/rbac-external-resizer.yaml
I0314 00:21:39.878191 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/rbac-external-resizer.yaml (2943 bytes)
I0314 00:21:39.881611 1964718 addons.go:426] installing /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I0314 00:21:39.881676 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml (1475 bytes)
I0314 00:21:40.182375 1964718 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I0314 00:21:40.234620 1964718 addons.go:426] installing /etc/kubernetes/addons/rbac-external-snapshotter.yaml
I0314 00:21:40.234697 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/rbac-external-snapshotter.yaml (3149 bytes)
I0314 00:21:40.244740 1964718 addons.go:426] installing /etc/kubernetes/addons/ig-crd.yaml
I0314 00:21:40.244816 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-crd.yaml (5216 bytes)
I0314 00:21:40.453864 1964718 addons.go:426] installing /etc/kubernetes/addons/ig-daemonset.yaml
I0314 00:21:40.453933 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-daemonset.yaml (7735 bytes)
I0314 00:21:40.486350 1964718 addons.go:426] installing /etc/kubernetes/addons/csi-hostpath-attacher.yaml
I0314 00:21:40.486426 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-attacher.yaml (2143 bytes)
I0314 00:21:40.546975 1964718 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/ig-namespace.yaml -f /etc/kubernetes/addons/ig-serviceaccount.yaml -f /etc/kubernetes/addons/ig-role.yaml -f /etc/kubernetes/addons/ig-rolebinding.yaml -f /etc/kubernetes/addons/ig-clusterrole.yaml -f /etc/kubernetes/addons/ig-clusterrolebinding.yaml -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-daemonset.yaml
I0314 00:21:40.702584 1964718 addons.go:426] installing /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml
I0314 00:21:40.702648 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml (1274 bytes)
I0314 00:21:41.114155 1964718 addons.go:426] installing /etc/kubernetes/addons/csi-hostpath-plugin.yaml
I0314 00:21:41.114229 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-plugin.yaml (8201 bytes)
I0314 00:21:41.403721 1964718 addons.go:426] installing /etc/kubernetes/addons/csi-hostpath-resizer.yaml
I0314 00:21:41.403782 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-resizer.yaml (2191 bytes)
I0314 00:21:41.610565 1964718 addons.go:426] installing /etc/kubernetes/addons/csi-hostpath-storageclass.yaml
I0314 00:21:41.610636 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-storageclass.yaml (846 bytes)
I0314 00:21:41.620818 1964718 ssh_runner.go:235] Completed: sudo systemctl start kubelet: (3.108739391s)
I0314 00:21:41.621755 1964718 node_ready.go:35] waiting up to 6m0s for node "addons-122411" to be "Ready" ...
I0314 00:21:41.621841 1964718 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.28.4/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.28.4/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -": (3.109907267s)
I0314 00:21:41.621958 1964718 start.go:948] {"host.minikube.internal": 192.168.49.1} host record injected into CoreDNS's ConfigMap
I0314 00:21:41.631490 1964718 node_ready.go:49] node "addons-122411" has status "Ready":"True"
I0314 00:21:41.631565 1964718 node_ready.go:38] duration metric: took 9.674573ms for node "addons-122411" to be "Ready" ...
I0314 00:21:41.631589 1964718 pod_ready.go:35] extra waiting up to 6m0s for all system-critical pods including labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0314 00:21:41.641725 1964718 pod_ready.go:78] waiting up to 6m0s for pod "coredns-5dd5756b68-9528m" in "kube-system" namespace to be "Ready" ...
I0314 00:21:42.007994 1964718 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/rbac-external-attacher.yaml -f /etc/kubernetes/addons/rbac-hostpath.yaml -f /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml -f /etc/kubernetes/addons/rbac-external-provisioner.yaml -f /etc/kubernetes/addons/rbac-external-resizer.yaml -f /etc/kubernetes/addons/rbac-external-snapshotter.yaml -f /etc/kubernetes/addons/csi-hostpath-attacher.yaml -f /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml -f /etc/kubernetes/addons/csi-hostpath-plugin.yaml -f /etc/kubernetes/addons/csi-hostpath-resizer.yaml -f /etc/kubernetes/addons/csi-hostpath-storageclass.yaml
I0314 00:21:42.126144 1964718 kapi.go:248] "coredns" deployment in "kube-system" namespace and "addons-122411" context rescaled to 1 replicas
I0314 00:21:42.644415 1964718 pod_ready.go:97] error getting pod "coredns-5dd5756b68-9528m" in "kube-system" namespace (skipping!): pods "coredns-5dd5756b68-9528m" not found
I0314 00:21:42.644444 1964718 pod_ready.go:81] duration metric: took 1.002642471s for pod "coredns-5dd5756b68-9528m" in "kube-system" namespace to be "Ready" ...
E0314 00:21:42.644456 1964718 pod_ready.go:66] WaitExtra: waitPodCondition: error getting pod "coredns-5dd5756b68-9528m" in "kube-system" namespace (skipping!): pods "coredns-5dd5756b68-9528m" not found
I0314 00:21:42.644464 1964718 pod_ready.go:78] waiting up to 6m0s for pod "coredns-5dd5756b68-zfmbr" in "kube-system" namespace to be "Ready" ...
I0314 00:21:44.713259 1964718 pod_ready.go:102] pod "coredns-5dd5756b68-zfmbr" in "kube-system" namespace has status "Ready":"False"
I0314 00:21:44.868468 1964718 ssh_runner.go:362] scp memory --> /var/lib/minikube/google_application_credentials.json (162 bytes)
I0314 00:21:44.868574 1964718 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-122411
I0314 00:21:44.910978 1964718 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35041 SSHKeyPath:/home/jenkins/minikube-integration/18375-1958430/.minikube/machines/addons-122411/id_rsa Username:docker}
I0314 00:21:45.530151 1964718 ssh_runner.go:362] scp memory --> /var/lib/minikube/google_cloud_project (12 bytes)
I0314 00:21:45.682730 1964718 addons.go:234] Setting addon gcp-auth=true in "addons-122411"
I0314 00:21:45.682832 1964718 host.go:66] Checking if "addons-122411" exists ...
I0314 00:21:45.683368 1964718 cli_runner.go:164] Run: docker container inspect addons-122411 --format={{.State.Status}}
I0314 00:21:45.707122 1964718 ssh_runner.go:195] Run: cat /var/lib/minikube/google_application_credentials.json
I0314 00:21:45.707181 1964718 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-122411
I0314 00:21:45.732796 1964718 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:35041 SSHKeyPath:/home/jenkins/minikube-integration/18375-1958430/.minikube/machines/addons-122411/id_rsa Username:docker}
I0314 00:21:46.585866 1964718 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/ingress-deploy.yaml: (7.9739045s)
I0314 00:21:46.585958 1964718 addons.go:470] Verifying addon ingress=true in "addons-122411"
I0314 00:21:46.588791 1964718 out.go:177] * Verifying ingress addon...
I0314 00:21:46.586234 1964718 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/ingress-dns-pod.yaml: (7.832939028s)
I0314 00:21:46.586265 1964718 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/nvidia-device-plugin.yaml: (7.814866639s)
I0314 00:21:46.586286 1964718 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/deployment.yaml: (7.737086902s)
I0314 00:21:46.586340 1964718 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (7.671815349s)
I0314 00:21:46.586366 1964718 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: (7.662082881s)
I0314 00:21:46.586455 1964718 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/registry-rc.yaml -f /etc/kubernetes/addons/registry-svc.yaml -f /etc/kubernetes/addons/registry-proxy.yaml: (7.292858486s)
I0314 00:21:46.586512 1964718 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: (7.284974602s)
I0314 00:21:46.586541 1964718 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/yakd-ns.yaml -f /etc/kubernetes/addons/yakd-sa.yaml -f /etc/kubernetes/addons/yakd-crb.yaml -f /etc/kubernetes/addons/yakd-svc.yaml -f /etc/kubernetes/addons/yakd-dp.yaml: (6.902915724s)
I0314 00:21:46.586619 1964718 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: (6.40416971s)
I0314 00:21:46.586660 1964718 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/ig-namespace.yaml -f /etc/kubernetes/addons/ig-serviceaccount.yaml -f /etc/kubernetes/addons/ig-role.yaml -f /etc/kubernetes/addons/ig-rolebinding.yaml -f /etc/kubernetes/addons/ig-clusterrole.yaml -f /etc/kubernetes/addons/ig-clusterrolebinding.yaml -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-daemonset.yaml: (6.039401646s)
I0314 00:21:46.586677 1964718 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/storage-provisioner-rancher.yaml: (7.569047562s)
I0314 00:21:46.591525 1964718 addons.go:470] Verifying addon registry=true in "addons-122411"
I0314 00:21:46.593920 1964718 out.go:177] * Verifying registry addon...
I0314 00:21:46.591974 1964718 kapi.go:75] Waiting for pod with label "app.kubernetes.io/name=ingress-nginx" in ns "ingress-nginx" ...
I0314 00:21:46.592023 1964718 addons.go:470] Verifying addon metrics-server=true in "addons-122411"
W0314 00:21:46.592044 1964718 addons.go:452] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: Process exited with status 1
stdout:
customresourcedefinition.apiextensions.k8s.io/volumesnapshotclasses.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshotcontents.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshots.snapshot.storage.k8s.io created
serviceaccount/snapshot-controller created
clusterrole.rbac.authorization.k8s.io/snapshot-controller-runner created
clusterrolebinding.rbac.authorization.k8s.io/snapshot-controller-role created
role.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
rolebinding.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
deployment.apps/snapshot-controller created
stderr:
error: resource mapping not found for name: "csi-hostpath-snapclass" namespace: "" from "/etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml": no matches for kind "VolumeSnapshotClass" in version "snapshot.storage.k8s.io/v1"
ensure CRDs are installed first
I0314 00:21:46.597472 1964718 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=registry" in ns "kube-system" ...
I0314 00:21:46.598732 1964718 out.go:177] * To access YAKD - Kubernetes Dashboard, wait for Pod to be ready and run the following command:
minikube -p addons-122411 service yakd-dashboard -n yakd-dashboard
I0314 00:21:46.598843 1964718 retry.go:31] will retry after 208.901216ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: Process exited with status 1
stdout:
customresourcedefinition.apiextensions.k8s.io/volumesnapshotclasses.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshotcontents.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshots.snapshot.storage.k8s.io created
serviceaccount/snapshot-controller created
clusterrole.rbac.authorization.k8s.io/snapshot-controller-runner created
clusterrolebinding.rbac.authorization.k8s.io/snapshot-controller-role created
role.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
rolebinding.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
deployment.apps/snapshot-controller created
stderr:
error: resource mapping not found for name: "csi-hostpath-snapclass" namespace: "" from "/etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml": no matches for kind "VolumeSnapshotClass" in version "snapshot.storage.k8s.io/v1"
ensure CRDs are installed first
I0314 00:21:46.608579 1964718 kapi.go:86] Found 3 Pods for label selector app.kubernetes.io/name=ingress-nginx
I0314 00:21:46.608603 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:21:46.609195 1964718 kapi.go:86] Found 2 Pods for label selector kubernetes.io/minikube-addons=registry
I0314 00:21:46.609206 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
W0314 00:21:46.615484 1964718 out.go:239] ! Enabling 'storage-provisioner-rancher' returned an error: running callbacks: [Error making local-path the default storage class: Error while marking storage class local-path as default: Operation cannot be fulfilled on storageclasses.storage.k8s.io "local-path": the object has been modified; please apply your changes to the latest version and try again]
I0314 00:21:46.811325 1964718 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply --force -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I0314 00:21:47.115176 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:21:47.116119 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0314 00:21:47.162724 1964718 pod_ready.go:102] pod "coredns-5dd5756b68-zfmbr" in "kube-system" namespace has status "Ready":"False"
I0314 00:21:47.616979 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:21:47.617826 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0314 00:21:47.977792 1964718 ssh_runner.go:235] Completed: cat /var/lib/minikube/google_application_credentials.json: (2.27063935s)
I0314 00:21:47.980913 1964718 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.0
I0314 00:21:47.978048 1964718 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/rbac-external-attacher.yaml -f /etc/kubernetes/addons/rbac-hostpath.yaml -f /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml -f /etc/kubernetes/addons/rbac-external-provisioner.yaml -f /etc/kubernetes/addons/rbac-external-resizer.yaml -f /etc/kubernetes/addons/rbac-external-snapshotter.yaml -f /etc/kubernetes/addons/csi-hostpath-attacher.yaml -f /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml -f /etc/kubernetes/addons/csi-hostpath-plugin.yaml -f /etc/kubernetes/addons/csi-hostpath-resizer.yaml -f /etc/kubernetes/addons/csi-hostpath-storageclass.yaml: (5.969953436s)
I0314 00:21:47.981069 1964718 addons.go:470] Verifying addon csi-hostpath-driver=true in "addons-122411"
I0314 00:21:47.983319 1964718 out.go:177] - Using image gcr.io/k8s-minikube/gcp-auth-webhook:v0.1.1
I0314 00:21:47.986520 1964718 addons.go:426] installing /etc/kubernetes/addons/gcp-auth-ns.yaml
I0314 00:21:47.986544 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/gcp-auth-ns.yaml (700 bytes)
I0314 00:21:47.986449 1964718 out.go:177] * Verifying csi-hostpath-driver addon...
I0314 00:21:47.990265 1964718 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=csi-hostpath-driver" in ns "kube-system" ...
I0314 00:21:48.010920 1964718 kapi.go:86] Found 3 Pods for label selector kubernetes.io/minikube-addons=csi-hostpath-driver
I0314 00:21:48.010944 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:21:48.037646 1964718 addons.go:426] installing /etc/kubernetes/addons/gcp-auth-service.yaml
I0314 00:21:48.037722 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/gcp-auth-service.yaml (788 bytes)
I0314 00:21:48.097928 1964718 addons.go:426] installing /etc/kubernetes/addons/gcp-auth-webhook.yaml
I0314 00:21:48.098001 1964718 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/gcp-auth-webhook.yaml (5421 bytes)
I0314 00:21:48.108412 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0314 00:21:48.111510 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:21:48.179283 1964718 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/gcp-auth-ns.yaml -f /etc/kubernetes/addons/gcp-auth-service.yaml -f /etc/kubernetes/addons/gcp-auth-webhook.yaml
I0314 00:21:48.498259 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:21:48.606767 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0314 00:21:48.608047 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:21:48.923245 1964718 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply --force -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: (2.111847183s)
I0314 00:21:48.997776 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:21:49.106542 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0314 00:21:49.107746 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:21:49.420412 1964718 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.28.4/kubectl apply -f /etc/kubernetes/addons/gcp-auth-ns.yaml -f /etc/kubernetes/addons/gcp-auth-service.yaml -f /etc/kubernetes/addons/gcp-auth-webhook.yaml: (1.241042139s)
I0314 00:21:49.423249 1964718 addons.go:470] Verifying addon gcp-auth=true in "addons-122411"
I0314 00:21:49.425715 1964718 out.go:177] * Verifying gcp-auth addon...
I0314 00:21:49.428342 1964718 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=gcp-auth" in ns "gcp-auth" ...
I0314 00:21:49.445995 1964718 kapi.go:86] Found 1 Pods for label selector kubernetes.io/minikube-addons=gcp-auth
I0314 00:21:49.446016 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:21:49.496916 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:21:49.603518 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:21:49.604923 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0314 00:21:49.651939 1964718 pod_ready.go:102] pod "coredns-5dd5756b68-zfmbr" in "kube-system" namespace has status "Ready":"False"
I0314 00:21:49.932559 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:21:49.996611 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:21:50.106452 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:21:50.107129 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0314 00:21:50.432831 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:21:50.498135 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:21:50.605119 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0314 00:21:50.605984 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:21:50.932415 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:21:51.001255 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:21:51.102990 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:21:51.104424 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0314 00:21:51.432865 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:21:51.497289 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:21:51.609339 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0314 00:21:51.611920 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:21:51.652822 1964718 pod_ready.go:102] pod "coredns-5dd5756b68-zfmbr" in "kube-system" namespace has status "Ready":"False"
I0314 00:21:51.933410 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:21:52.008106 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:21:52.108656 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:21:52.114019 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0314 00:21:52.435935 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:21:52.498794 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:21:52.606228 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:21:52.606641 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0314 00:21:52.932872 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:21:53.002595 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:21:53.112019 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0314 00:21:53.112224 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:21:53.433080 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:21:53.498731 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:21:53.607359 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:21:53.624991 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0314 00:21:53.652595 1964718 pod_ready.go:92] pod "coredns-5dd5756b68-zfmbr" in "kube-system" namespace has status "Ready":"True"
I0314 00:21:53.652636 1964718 pod_ready.go:81] duration metric: took 11.00816414s for pod "coredns-5dd5756b68-zfmbr" in "kube-system" namespace to be "Ready" ...
I0314 00:21:53.652649 1964718 pod_ready.go:78] waiting up to 6m0s for pod "etcd-addons-122411" in "kube-system" namespace to be "Ready" ...
I0314 00:21:53.662987 1964718 pod_ready.go:92] pod "etcd-addons-122411" in "kube-system" namespace has status "Ready":"True"
I0314 00:21:53.663017 1964718 pod_ready.go:81] duration metric: took 10.356275ms for pod "etcd-addons-122411" in "kube-system" namespace to be "Ready" ...
I0314 00:21:53.663034 1964718 pod_ready.go:78] waiting up to 6m0s for pod "kube-apiserver-addons-122411" in "kube-system" namespace to be "Ready" ...
I0314 00:21:53.669353 1964718 pod_ready.go:92] pod "kube-apiserver-addons-122411" in "kube-system" namespace has status "Ready":"True"
I0314 00:21:53.669392 1964718 pod_ready.go:81] duration metric: took 6.341374ms for pod "kube-apiserver-addons-122411" in "kube-system" namespace to be "Ready" ...
I0314 00:21:53.669405 1964718 pod_ready.go:78] waiting up to 6m0s for pod "kube-controller-manager-addons-122411" in "kube-system" namespace to be "Ready" ...
I0314 00:21:53.684480 1964718 pod_ready.go:92] pod "kube-controller-manager-addons-122411" in "kube-system" namespace has status "Ready":"True"
I0314 00:21:53.684508 1964718 pod_ready.go:81] duration metric: took 15.096028ms for pod "kube-controller-manager-addons-122411" in "kube-system" namespace to be "Ready" ...
I0314 00:21:53.684521 1964718 pod_ready.go:78] waiting up to 6m0s for pod "kube-proxy-l8qg6" in "kube-system" namespace to be "Ready" ...
I0314 00:21:53.694920 1964718 pod_ready.go:92] pod "kube-proxy-l8qg6" in "kube-system" namespace has status "Ready":"True"
I0314 00:21:53.694944 1964718 pod_ready.go:81] duration metric: took 10.415623ms for pod "kube-proxy-l8qg6" in "kube-system" namespace to be "Ready" ...
I0314 00:21:53.694955 1964718 pod_ready.go:78] waiting up to 6m0s for pod "kube-scheduler-addons-122411" in "kube-system" namespace to be "Ready" ...
I0314 00:21:53.933265 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:21:53.997523 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:21:54.050762 1964718 pod_ready.go:92] pod "kube-scheduler-addons-122411" in "kube-system" namespace has status "Ready":"True"
I0314 00:21:54.050796 1964718 pod_ready.go:81] duration metric: took 355.831579ms for pod "kube-scheduler-addons-122411" in "kube-system" namespace to be "Ready" ...
I0314 00:21:54.050811 1964718 pod_ready.go:78] waiting up to 6m0s for pod "nvidia-device-plugin-daemonset-98jl7" in "kube-system" namespace to be "Ready" ...
I0314 00:21:54.105597 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0314 00:21:54.106806 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:21:54.433284 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:21:54.496487 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:21:54.603344 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:21:54.605818 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0314 00:21:54.932745 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:21:54.995882 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:21:55.105702 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:21:55.108204 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0314 00:21:55.432739 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:21:55.497271 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:21:55.605761 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0314 00:21:55.606917 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:21:55.935847 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:21:55.997244 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:21:56.058395 1964718 pod_ready.go:102] pod "nvidia-device-plugin-daemonset-98jl7" in "kube-system" namespace has status "Ready":"False"
I0314 00:21:56.109254 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:21:56.110119 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0314 00:21:56.431876 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:21:56.496837 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:21:56.604491 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:21:56.605399 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0314 00:21:56.934265 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:21:56.997460 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:21:57.106219 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0314 00:21:57.106491 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:21:57.434978 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:21:57.497700 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:21:57.605387 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:21:57.607996 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0314 00:21:57.933149 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:21:57.996979 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:21:58.102468 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:21:58.105576 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0314 00:21:58.432344 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:21:58.495950 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:21:58.557136 1964718 pod_ready.go:102] pod "nvidia-device-plugin-daemonset-98jl7" in "kube-system" namespace has status "Ready":"False"
I0314 00:21:58.604204 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:21:58.614231 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0314 00:21:58.933302 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:21:58.996098 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:21:59.105441 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:21:59.105904 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0314 00:21:59.434671 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:21:59.496434 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:21:59.602206 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:21:59.604190 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0314 00:21:59.932462 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:21:59.996776 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:00.110706 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:00.125318 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0314 00:22:00.433023 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:00.498549 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:00.559100 1964718 pod_ready.go:102] pod "nvidia-device-plugin-daemonset-98jl7" in "kube-system" namespace has status "Ready":"False"
I0314 00:22:00.602179 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:00.604987 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0314 00:22:00.935101 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:00.997290 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:01.104364 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:01.104958 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0314 00:22:01.431968 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:01.496237 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:01.604793 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0314 00:22:01.607148 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:01.931932 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:01.997823 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:02.104383 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:02.107891 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0314 00:22:02.432864 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:02.496242 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:02.604417 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:02.605073 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0314 00:22:02.934506 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:02.997093 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:03.062496 1964718 pod_ready.go:102] pod "nvidia-device-plugin-daemonset-98jl7" in "kube-system" namespace has status "Ready":"False"
I0314 00:22:03.104641 1964718 kapi.go:107] duration metric: took 16.507168622s to wait for kubernetes.io/minikube-addons=registry ...
I0314 00:22:03.104910 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:03.440636 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:03.497192 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:03.605555 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:03.935891 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:03.996579 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:04.103625 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:04.432559 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:04.497354 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:04.602873 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:04.933101 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:05.006519 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:05.102532 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:05.432617 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:05.502643 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:05.559183 1964718 pod_ready.go:102] pod "nvidia-device-plugin-daemonset-98jl7" in "kube-system" namespace has status "Ready":"False"
I0314 00:22:05.603390 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:05.932459 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:05.996606 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:06.103828 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:06.436449 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:06.496714 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:06.602601 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:06.933302 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:06.996573 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:07.102126 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:07.431949 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:07.498815 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:07.604409 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:07.934902 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:08.003123 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:08.072681 1964718 pod_ready.go:102] pod "nvidia-device-plugin-daemonset-98jl7" in "kube-system" namespace has status "Ready":"False"
I0314 00:22:08.102109 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:08.434118 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:08.501174 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:08.602770 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:08.932951 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:08.996580 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:09.102821 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:09.434768 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:09.497037 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:09.603117 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:09.932256 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:09.997293 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:10.104520 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:10.432281 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:10.495812 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:10.557367 1964718 pod_ready.go:102] pod "nvidia-device-plugin-daemonset-98jl7" in "kube-system" namespace has status "Ready":"False"
I0314 00:22:10.602198 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:10.932148 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:10.996710 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:11.103588 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:11.432706 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:11.499648 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:11.606653 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:11.932862 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:12.010556 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:12.120071 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:12.431761 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:12.501820 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:12.557605 1964718 pod_ready.go:102] pod "nvidia-device-plugin-daemonset-98jl7" in "kube-system" namespace has status "Ready":"False"
I0314 00:22:12.605844 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:12.933438 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:12.997968 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:13.102461 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:13.432939 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:13.495873 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:13.602563 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:13.932937 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:13.997076 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:14.102334 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:14.432362 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:14.498079 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:14.602235 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:14.932287 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:14.995657 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:15.059592 1964718 pod_ready.go:102] pod "nvidia-device-plugin-daemonset-98jl7" in "kube-system" namespace has status "Ready":"False"
I0314 00:22:15.102901 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:15.432968 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:15.496922 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:15.608173 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:15.935000 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:15.997865 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:16.102842 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:16.432566 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:16.495820 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:16.603750 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:16.932715 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:16.996140 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:17.059942 1964718 pod_ready.go:102] pod "nvidia-device-plugin-daemonset-98jl7" in "kube-system" namespace has status "Ready":"False"
I0314 00:22:17.102858 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:17.432937 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:17.495794 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:17.602301 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:17.932439 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:17.996552 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:18.102675 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:18.432788 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:18.496648 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:18.602116 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:18.932888 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:19.008597 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:19.103305 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:19.432277 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:19.496166 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:19.558341 1964718 pod_ready.go:102] pod "nvidia-device-plugin-daemonset-98jl7" in "kube-system" namespace has status "Ready":"False"
I0314 00:22:19.602848 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:19.933188 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:19.996560 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:20.103192 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:20.432938 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:20.496549 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:20.602507 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:20.932794 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:20.995535 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:21.103294 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:21.432781 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:21.496355 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:21.560843 1964718 pod_ready.go:102] pod "nvidia-device-plugin-daemonset-98jl7" in "kube-system" namespace has status "Ready":"False"
I0314 00:22:21.602173 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:21.939090 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:21.998640 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:22.104520 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:22.432986 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:22.496352 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:22.602728 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:22.932976 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:22.996425 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:23.103652 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:23.432765 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:23.496970 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:23.602914 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:23.933068 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:23.998170 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:24.060383 1964718 pod_ready.go:92] pod "nvidia-device-plugin-daemonset-98jl7" in "kube-system" namespace has status "Ready":"True"
I0314 00:22:24.060411 1964718 pod_ready.go:81] duration metric: took 30.009590889s for pod "nvidia-device-plugin-daemonset-98jl7" in "kube-system" namespace to be "Ready" ...
I0314 00:22:24.060421 1964718 pod_ready.go:38] duration metric: took 42.42880696s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0314 00:22:24.060436 1964718 api_server.go:52] waiting for apiserver process to appear ...
I0314 00:22:24.060500 1964718 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0314 00:22:24.079313 1964718 api_server.go:72] duration metric: took 46.470759052s to wait for apiserver process to appear ...
I0314 00:22:24.079342 1964718 api_server.go:88] waiting for apiserver healthz status ...
I0314 00:22:24.079396 1964718 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8443/healthz ...
I0314 00:22:24.088637 1964718 api_server.go:279] https://192.168.49.2:8443/healthz returned 200:
ok
I0314 00:22:24.090211 1964718 api_server.go:141] control plane version: v1.28.4
I0314 00:22:24.090247 1964718 api_server.go:131] duration metric: took 10.896489ms to wait for apiserver health ...
I0314 00:22:24.090259 1964718 system_pods.go:43] waiting for kube-system pods to appear ...
I0314 00:22:24.100919 1964718 system_pods.go:59] 18 kube-system pods found
I0314 00:22:24.100959 1964718 system_pods.go:61] "coredns-5dd5756b68-zfmbr" [2549e865-f13d-45db-bd8c-a463d0ede910] Running
I0314 00:22:24.100966 1964718 system_pods.go:61] "csi-hostpath-attacher-0" [75f3d193-77c8-4f59-ba26-435c0f9de530] Running
I0314 00:22:24.100971 1964718 system_pods.go:61] "csi-hostpath-resizer-0" [e6388b1f-413f-4779-ba10-727304b97804] Running
I0314 00:22:24.100979 1964718 system_pods.go:61] "csi-hostpathplugin-tvkjl" [8990c7d4-be70-4eb4-98b8-725f79750022] Pending / Ready:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter])
I0314 00:22:24.100985 1964718 system_pods.go:61] "etcd-addons-122411" [623dde03-3da0-4fad-b33e-2498c50c1685] Running
I0314 00:22:24.100991 1964718 system_pods.go:61] "kindnet-84kzz" [564bf94d-cd62-43fb-9f84-9976346805b2] Running
I0314 00:22:24.100995 1964718 system_pods.go:61] "kube-apiserver-addons-122411" [e6842e57-b8af-481a-89f1-c237561993cb] Running
I0314 00:22:24.101000 1964718 system_pods.go:61] "kube-controller-manager-addons-122411" [08756eb3-4411-4d66-b998-552fd2d71228] Running
I0314 00:22:24.101011 1964718 system_pods.go:61] "kube-ingress-dns-minikube" [c3eb37ea-3bfe-44e7-9383-d812dede7b99] Running / Ready:ContainersNotReady (containers with unready status: [minikube-ingress-dns]) / ContainersReady:ContainersNotReady (containers with unready status: [minikube-ingress-dns])
I0314 00:22:24.101022 1964718 system_pods.go:61] "kube-proxy-l8qg6" [41ad03f4-7ab5-4995-9c6d-18b18abf5c70] Running
I0314 00:22:24.101027 1964718 system_pods.go:61] "kube-scheduler-addons-122411" [d100c13d-299b-4a39-8b49-f777bf7480bb] Running
I0314 00:22:24.101031 1964718 system_pods.go:61] "metrics-server-69cf46c98-5qtdh" [fe7998ae-210d-4e08-81f9-3e7f19032943] Running
I0314 00:22:24.101037 1964718 system_pods.go:61] "nvidia-device-plugin-daemonset-98jl7" [3e3aa7cb-5082-4ad3-bd32-fb855ec98c06] Running
I0314 00:22:24.101047 1964718 system_pods.go:61] "registry-k7jx6" [aaa61793-7482-468e-9a48-807a12f2eae9] Running
I0314 00:22:24.101053 1964718 system_pods.go:61] "registry-proxy-ksf2h" [3c344254-4937-4c56-8655-cc99d0982dbf] Running
I0314 00:22:24.101057 1964718 system_pods.go:61] "snapshot-controller-58dbcc7b99-k5hps" [9909c6c9-0e31-4f6d-9c10-0b8da1c4208b] Running
I0314 00:22:24.101061 1964718 system_pods.go:61] "snapshot-controller-58dbcc7b99-s5lrs" [08f61656-83f2-49b2-8154-eaec3e1e35f0] Running
I0314 00:22:24.101073 1964718 system_pods.go:61] "storage-provisioner" [ae7009c0-b2f3-440b-a120-b008e703c335] Running
I0314 00:22:24.101080 1964718 system_pods.go:74] duration metric: took 10.814324ms to wait for pod list to return data ...
I0314 00:22:24.101089 1964718 default_sa.go:34] waiting for default service account to be created ...
I0314 00:22:24.103877 1964718 default_sa.go:45] found service account: "default"
I0314 00:22:24.103905 1964718 default_sa.go:55] duration metric: took 2.80742ms for default service account to be created ...
I0314 00:22:24.103915 1964718 system_pods.go:116] waiting for k8s-apps to be running ...
I0314 00:22:24.106693 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:24.114994 1964718 system_pods.go:86] 18 kube-system pods found
I0314 00:22:24.115036 1964718 system_pods.go:89] "coredns-5dd5756b68-zfmbr" [2549e865-f13d-45db-bd8c-a463d0ede910] Running
I0314 00:22:24.115044 1964718 system_pods.go:89] "csi-hostpath-attacher-0" [75f3d193-77c8-4f59-ba26-435c0f9de530] Running
I0314 00:22:24.115049 1964718 system_pods.go:89] "csi-hostpath-resizer-0" [e6388b1f-413f-4779-ba10-727304b97804] Running
I0314 00:22:24.115082 1964718 system_pods.go:89] "csi-hostpathplugin-tvkjl" [8990c7d4-be70-4eb4-98b8-725f79750022] Pending / Ready:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter])
I0314 00:22:24.115095 1964718 system_pods.go:89] "etcd-addons-122411" [623dde03-3da0-4fad-b33e-2498c50c1685] Running
I0314 00:22:24.115102 1964718 system_pods.go:89] "kindnet-84kzz" [564bf94d-cd62-43fb-9f84-9976346805b2] Running
I0314 00:22:24.115106 1964718 system_pods.go:89] "kube-apiserver-addons-122411" [e6842e57-b8af-481a-89f1-c237561993cb] Running
I0314 00:22:24.115113 1964718 system_pods.go:89] "kube-controller-manager-addons-122411" [08756eb3-4411-4d66-b998-552fd2d71228] Running
I0314 00:22:24.115126 1964718 system_pods.go:89] "kube-ingress-dns-minikube" [c3eb37ea-3bfe-44e7-9383-d812dede7b99] Running / Ready:ContainersNotReady (containers with unready status: [minikube-ingress-dns]) / ContainersReady:ContainersNotReady (containers with unready status: [minikube-ingress-dns])
I0314 00:22:24.115130 1964718 system_pods.go:89] "kube-proxy-l8qg6" [41ad03f4-7ab5-4995-9c6d-18b18abf5c70] Running
I0314 00:22:24.115135 1964718 system_pods.go:89] "kube-scheduler-addons-122411" [d100c13d-299b-4a39-8b49-f777bf7480bb] Running
I0314 00:22:24.115171 1964718 system_pods.go:89] "metrics-server-69cf46c98-5qtdh" [fe7998ae-210d-4e08-81f9-3e7f19032943] Running
I0314 00:22:24.115183 1964718 system_pods.go:89] "nvidia-device-plugin-daemonset-98jl7" [3e3aa7cb-5082-4ad3-bd32-fb855ec98c06] Running
I0314 00:22:24.115188 1964718 system_pods.go:89] "registry-k7jx6" [aaa61793-7482-468e-9a48-807a12f2eae9] Running
I0314 00:22:24.115192 1964718 system_pods.go:89] "registry-proxy-ksf2h" [3c344254-4937-4c56-8655-cc99d0982dbf] Running
I0314 00:22:24.115217 1964718 system_pods.go:89] "snapshot-controller-58dbcc7b99-k5hps" [9909c6c9-0e31-4f6d-9c10-0b8da1c4208b] Running
I0314 00:22:24.115228 1964718 system_pods.go:89] "snapshot-controller-58dbcc7b99-s5lrs" [08f61656-83f2-49b2-8154-eaec3e1e35f0] Running
I0314 00:22:24.115233 1964718 system_pods.go:89] "storage-provisioner" [ae7009c0-b2f3-440b-a120-b008e703c335] Running
I0314 00:22:24.115250 1964718 system_pods.go:126] duration metric: took 11.319896ms to wait for k8s-apps to be running ...
I0314 00:22:24.115266 1964718 system_svc.go:44] waiting for kubelet service to be running ....
I0314 00:22:24.115339 1964718 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I0314 00:22:24.128488 1964718 system_svc.go:56] duration metric: took 13.213107ms WaitForService to wait for kubelet
I0314 00:22:24.128518 1964718 kubeadm.go:576] duration metric: took 46.519971796s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0314 00:22:24.128539 1964718 node_conditions.go:102] verifying NodePressure condition ...
I0314 00:22:24.133042 1964718 node_conditions.go:122] node storage ephemeral capacity is 203034800Ki
I0314 00:22:24.133078 1964718 node_conditions.go:123] node cpu capacity is 2
I0314 00:22:24.133091 1964718 node_conditions.go:105] duration metric: took 4.546369ms to run NodePressure ...
I0314 00:22:24.133104 1964718 start.go:240] waiting for startup goroutines ...
I0314 00:22:24.432927 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:24.497158 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:24.602729 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:24.932706 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:24.996853 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:25.103240 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:25.433916 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:25.497874 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:25.602404 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:25.934524 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:25.997035 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:26.102907 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:26.436214 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:26.496509 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:26.602742 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:26.933614 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:26.997820 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:27.103100 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:27.432687 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:27.496952 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:27.602532 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:27.932890 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:27.996797 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:28.103341 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:28.432434 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:28.496821 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:28.602525 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:28.933370 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:28.996507 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:29.102464 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:29.432624 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:29.496614 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:29.602856 1964718 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0314 00:22:29.934775 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:29.997409 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:30.105484 1964718 kapi.go:107] duration metric: took 43.513505491s to wait for app.kubernetes.io/name=ingress-nginx ...
I0314 00:22:30.432898 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:30.496704 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:30.933574 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:30.997265 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:31.432822 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:31.496580 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:31.934298 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:31.995662 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:32.432470 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0314 00:22:32.495834 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:32.933070 1964718 kapi.go:107] duration metric: took 43.504726108s to wait for kubernetes.io/minikube-addons=gcp-auth ...
I0314 00:22:32.935180 1964718 out.go:177] * Your GCP credentials will now be mounted into every pod created in the addons-122411 cluster.
I0314 00:22:32.937329 1964718 out.go:177] * If you don't want your credentials mounted into a specific pod, add a label with the `gcp-auth-skip-secret` key to your pod configuration.
I0314 00:22:32.939888 1964718 out.go:177] * If you want existing pods to be mounted with credentials, either recreate them or rerun addons enable with --refresh.
I0314 00:22:32.996372 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:33.495854 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:33.998234 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:34.496669 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:34.995650 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:35.496681 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:35.996333 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:36.495694 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:37.003121 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:37.495538 1964718 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0314 00:22:37.996431 1964718 kapi.go:107] duration metric: took 50.006162528s to wait for kubernetes.io/minikube-addons=csi-hostpath-driver ...
I0314 00:22:38.000840 1964718 out.go:177] * Enabled addons: ingress-dns, nvidia-device-plugin, cloud-spanner, storage-provisioner, inspektor-gadget, metrics-server, yakd, default-storageclass, volumesnapshots, registry, ingress, gcp-auth, csi-hostpath-driver
I0314 00:22:38.002754 1964718 addons.go:505] duration metric: took 1m0.393893567s for enable addons: enabled=[ingress-dns nvidia-device-plugin cloud-spanner storage-provisioner inspektor-gadget metrics-server yakd default-storageclass volumesnapshots registry ingress gcp-auth csi-hostpath-driver]
I0314 00:22:38.002832 1964718 start.go:245] waiting for cluster config update ...
I0314 00:22:38.002856 1964718 start.go:254] writing updated cluster config ...
I0314 00:22:38.003318 1964718 ssh_runner.go:195] Run: rm -f paused
I0314 00:22:38.341418 1964718 start.go:600] kubectl: 1.29.2, cluster: 1.28.4 (minor skew: 1)
I0314 00:22:38.343557 1964718 out.go:177] * Done! kubectl is now configured to use "addons-122411" cluster and "default" namespace by default
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD
42811fc06c380 dd1b12fcb6097 8 seconds ago Exited hello-world-app 2 445e852e07026 hello-world-app-5d77478584-ktm7f
be7e5417828a8 be5e6f23a9904 34 seconds ago Running nginx 0 dc37f04ac7685 nginx
8f6aedeb20a63 ee6d597e62dc8 About a minute ago Exited csi-snapshotter 0 21d4baa31a6d3 csi-hostpathplugin-tvkjl
fefe2ff132232 642ded511e141 About a minute ago Exited csi-provisioner 0 21d4baa31a6d3 csi-hostpathplugin-tvkjl
95780c4d10a9e 922312104da8a About a minute ago Exited liveness-probe 0 21d4baa31a6d3 csi-hostpathplugin-tvkjl
a6a4c9e531ee9 08f6b2990811a About a minute ago Exited hostpath 0 21d4baa31a6d3 csi-hostpathplugin-tvkjl
99e901f3c5fb8 bafe72500920c About a minute ago Running gcp-auth 0 e20939c7f2b4b gcp-auth-5f6b4f85fd-86hlm
1c5c881a17d34 0107d56dbc0be About a minute ago Exited node-driver-registrar 0 21d4baa31a6d3 csi-hostpathplugin-tvkjl
be84f20a0ed63 c0cfb4ce73bda About a minute ago Running nvidia-device-plugin-ctr 0 4e6865a7694d8 nvidia-device-plugin-daemonset-98jl7
89172c0dbafb4 487fa743e1e22 About a minute ago Exited csi-resizer 0 c25d3fd5a1ad4 csi-hostpath-resizer-0
e5b57a2274e94 1461903ec4fe9 About a minute ago Exited csi-external-health-monitor-controller 0 21d4baa31a6d3 csi-hostpathplugin-tvkjl
f5b7287eb555c 9a80d518f102c About a minute ago Exited csi-attacher 0 1582b6e8c5198 csi-hostpath-attacher-0
0d1480a78a678 1a024e390dd05 About a minute ago Exited patch 0 69778a54da324 ingress-nginx-admission-patch-l4g59
80571bfc05cd1 1a024e390dd05 About a minute ago Exited create 0 b2dafd1304c4a ingress-nginx-admission-create-cqgx5
8fcf16cf0b609 4d1e5c3e97420 About a minute ago Running volume-snapshot-controller 0 526aa2706b714 snapshot-controller-58dbcc7b99-k5hps
78bf62845fe81 7ce2150c8929b About a minute ago Running local-path-provisioner 0 bb0667de329c4 local-path-provisioner-78b46b4d5c-8mmnx
e6813e18710ba 4d1e5c3e97420 About a minute ago Running volume-snapshot-controller 0 56dedca96e2c1 snapshot-controller-58dbcc7b99-s5lrs
afd6df3e13aa8 20e3f2db01e81 About a minute ago Running yakd 0 828008ef71f21 yakd-dashboard-9947fc6bf-7xh2x
5ecc0e7602972 97e04611ad434 About a minute ago Running coredns 0 f36271ff34e70 coredns-5dd5756b68-zfmbr
900f595fcc633 41340d5d57adb 2 minutes ago Running cloud-spanner-emulator 0 4fadc2659c6f1 cloud-spanner-emulator-6548d5df46-9pvwd
61ca94ba68816 ba04bb24b9575 2 minutes ago Running storage-provisioner 0 ba5ef7c66edfd storage-provisioner
fd41927d5b596 4740c1948d3fc 2 minutes ago Running kindnet-cni 0 d2856623ff829 kindnet-84kzz
7efda6344d4a8 3ca3ca488cf13 2 minutes ago Running kube-proxy 0 5e4e6a9aa966c kube-proxy-l8qg6
bedee29a8b768 05c284c929889 2 minutes ago Running kube-scheduler 0 5acc5af3027da kube-scheduler-addons-122411
e3a90640ceaa5 9961cbceaf234 2 minutes ago Running kube-controller-manager 0 27e5220ee5804 kube-controller-manager-addons-122411
4a5e785fd0a00 04b4c447bb9d4 2 minutes ago Running kube-apiserver 0 96da72f60ca02 kube-apiserver-addons-122411
6011bc5d47236 9cdd6470f48c8 2 minutes ago Running etcd 0 6c08783c36256 etcd-addons-122411
==> containerd <==
Mar 14 00:23:49 addons-122411 containerd[767]: time="2024-03-14T00:23:49.368878868Z" level=info msg="shim disconnected" id=1582b6e8c51982c8fc5e9ceb974816e9698f6efadd012b8a96b440d0a4db7e8e
Mar 14 00:23:49 addons-122411 containerd[767]: time="2024-03-14T00:23:49.368935097Z" level=warning msg="cleaning up after shim disconnected" id=1582b6e8c51982c8fc5e9ceb974816e9698f6efadd012b8a96b440d0a4db7e8e namespace=k8s.io
Mar 14 00:23:49 addons-122411 containerd[767]: time="2024-03-14T00:23:49.368945715Z" level=info msg="cleaning up dead shim"
Mar 14 00:23:49 addons-122411 containerd[767]: time="2024-03-14T00:23:49.369232785Z" level=info msg="StopPodSandbox for \"21d4baa31a6d329eab1ed1aea9ab5dd0b90553141a15930de90aa7d09cba6082\""
Mar 14 00:23:49 addons-122411 containerd[767]: time="2024-03-14T00:23:49.369303365Z" level=info msg="Container to stop \"1c5c881a17d345a78c4e70cd883551cedcc834fdf4e5ef82b90204e4de7591bb\" must be in running or unknown state, current state \"CONTAINER_EXITED\""
Mar 14 00:23:49 addons-122411 containerd[767]: time="2024-03-14T00:23:49.369320809Z" level=info msg="Container to stop \"a6a4c9e531ee911d190f485629532c8a7a726b22512f13ad9b94386320bb44da\" must be in running or unknown state, current state \"CONTAINER_EXITED\""
Mar 14 00:23:49 addons-122411 containerd[767]: time="2024-03-14T00:23:49.369335103Z" level=info msg="Container to stop \"95780c4d10a9e477127ccb65b77fb2c87377e43999e418a0b04b4580c35ac5f3\" must be in running or unknown state, current state \"CONTAINER_EXITED\""
Mar 14 00:23:49 addons-122411 containerd[767]: time="2024-03-14T00:23:49.369348846Z" level=info msg="Container to stop \"8f6aedeb20a63c51e31ebf66c078cb46b990f0c2e6ad8a1dbc8666f7080034ef\" must be in running or unknown state, current state \"CONTAINER_EXITED\""
Mar 14 00:23:49 addons-122411 containerd[767]: time="2024-03-14T00:23:49.369364624Z" level=info msg="Container to stop \"e5b57a2274e94c4fcb757fd326ce6dc630d6dd4ce269bcfde6c05b9095fc75b0\" must be in running or unknown state, current state \"CONTAINER_EXITED\""
Mar 14 00:23:49 addons-122411 containerd[767]: time="2024-03-14T00:23:49.369378114Z" level=info msg="Container to stop \"fefe2ff132232a17d5cb6a717cf2a3434a946a917d0b6fc059004b017cbbd7be\" must be in running or unknown state, current state \"CONTAINER_EXITED\""
Mar 14 00:23:49 addons-122411 containerd[767]: time="2024-03-14T00:23:49.395579935Z" level=warning msg="cleanup warnings time=\"2024-03-14T00:23:49Z\" level=info msg=\"starting signal loop\" namespace=k8s.io pid=8954 runtime=io.containerd.runc.v2\n"
Mar 14 00:23:49 addons-122411 containerd[767]: time="2024-03-14T00:23:49.432265854Z" level=info msg="shim disconnected" id=c25d3fd5a1ad4024175bb6d6922efcd6a0d021190e774b411a852450127a9982
Mar 14 00:23:49 addons-122411 containerd[767]: time="2024-03-14T00:23:49.432591718Z" level=warning msg="cleaning up after shim disconnected" id=c25d3fd5a1ad4024175bb6d6922efcd6a0d021190e774b411a852450127a9982 namespace=k8s.io
Mar 14 00:23:49 addons-122411 containerd[767]: time="2024-03-14T00:23:49.432610229Z" level=info msg="cleaning up dead shim"
Mar 14 00:23:49 addons-122411 containerd[767]: time="2024-03-14T00:23:49.435753196Z" level=info msg="shim disconnected" id=21d4baa31a6d329eab1ed1aea9ab5dd0b90553141a15930de90aa7d09cba6082
Mar 14 00:23:49 addons-122411 containerd[767]: time="2024-03-14T00:23:49.435818623Z" level=warning msg="cleaning up after shim disconnected" id=21d4baa31a6d329eab1ed1aea9ab5dd0b90553141a15930de90aa7d09cba6082 namespace=k8s.io
Mar 14 00:23:49 addons-122411 containerd[767]: time="2024-03-14T00:23:49.435830217Z" level=info msg="cleaning up dead shim"
Mar 14 00:23:49 addons-122411 containerd[767]: time="2024-03-14T00:23:49.448259137Z" level=warning msg="cleanup warnings time=\"2024-03-14T00:23:49Z\" level=info msg=\"starting signal loop\" namespace=k8s.io pid=9013 runtime=io.containerd.runc.v2\n"
Mar 14 00:23:49 addons-122411 containerd[767]: time="2024-03-14T00:23:49.454255162Z" level=warning msg="cleanup warnings time=\"2024-03-14T00:23:49Z\" level=info msg=\"starting signal loop\" namespace=k8s.io pid=9018 runtime=io.containerd.runc.v2\n"
Mar 14 00:23:49 addons-122411 containerd[767]: time="2024-03-14T00:23:49.456202511Z" level=info msg="TearDown network for sandbox \"1582b6e8c51982c8fc5e9ceb974816e9698f6efadd012b8a96b440d0a4db7e8e\" successfully"
Mar 14 00:23:49 addons-122411 containerd[767]: time="2024-03-14T00:23:49.456247894Z" level=info msg="StopPodSandbox for \"1582b6e8c51982c8fc5e9ceb974816e9698f6efadd012b8a96b440d0a4db7e8e\" returns successfully"
Mar 14 00:23:49 addons-122411 containerd[767]: time="2024-03-14T00:23:49.512160491Z" level=info msg="TearDown network for sandbox \"c25d3fd5a1ad4024175bb6d6922efcd6a0d021190e774b411a852450127a9982\" successfully"
Mar 14 00:23:49 addons-122411 containerd[767]: time="2024-03-14T00:23:49.512213963Z" level=info msg="StopPodSandbox for \"c25d3fd5a1ad4024175bb6d6922efcd6a0d021190e774b411a852450127a9982\" returns successfully"
Mar 14 00:23:49 addons-122411 containerd[767]: time="2024-03-14T00:23:49.537973294Z" level=info msg="TearDown network for sandbox \"21d4baa31a6d329eab1ed1aea9ab5dd0b90553141a15930de90aa7d09cba6082\" successfully"
Mar 14 00:23:49 addons-122411 containerd[767]: time="2024-03-14T00:23:49.538023960Z" level=info msg="StopPodSandbox for \"21d4baa31a6d329eab1ed1aea9ab5dd0b90553141a15930de90aa7d09cba6082\" returns successfully"
==> coredns [5ecc0e760297260d85e3dc04184a1a6386d9ab0f8dd1c7586ebf98d6d6cbf8ce] <==
[INFO] 10.244.0.19:43348 - 52215 "AAAA IN hello-world-app.default.svc.cluster.local.cluster.local. udp 73 false 512" NXDOMAIN qr,aa,rd 166 0.000149594s
[INFO] 10.244.0.19:43348 - 4900 "A IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.009501425s
[INFO] 10.244.0.19:49661 - 37165 "A IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.009487443s
[INFO] 10.244.0.19:43348 - 6641 "AAAA IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.01349177s
[INFO] 10.244.0.19:49661 - 47428 "AAAA IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.01379471s
[INFO] 10.244.0.19:49661 - 1726 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.00018445s
[INFO] 10.244.0.19:43348 - 34644 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000057731s
[INFO] 10.244.0.19:57925 - 9916 "A IN hello-world-app.default.svc.cluster.local.ingress-nginx.svc.cluster.local. udp 91 false 512" NXDOMAIN qr,aa,rd 184 0.000087893s
[INFO] 10.244.0.19:50716 - 14509 "A IN hello-world-app.default.svc.cluster.local.ingress-nginx.svc.cluster.local. udp 91 false 512" NXDOMAIN qr,aa,rd 184 0.000076487s
[INFO] 10.244.0.19:57925 - 50671 "AAAA IN hello-world-app.default.svc.cluster.local.ingress-nginx.svc.cluster.local. udp 91 false 512" NXDOMAIN qr,aa,rd 184 0.000043962s
[INFO] 10.244.0.19:50716 - 59698 "AAAA IN hello-world-app.default.svc.cluster.local.ingress-nginx.svc.cluster.local. udp 91 false 512" NXDOMAIN qr,aa,rd 184 0.000061021s
[INFO] 10.244.0.19:57925 - 45775 "A IN hello-world-app.default.svc.cluster.local.svc.cluster.local. udp 77 false 512" NXDOMAIN qr,aa,rd 170 0.000077152s
[INFO] 10.244.0.19:50716 - 57642 "A IN hello-world-app.default.svc.cluster.local.svc.cluster.local. udp 77 false 512" NXDOMAIN qr,aa,rd 170 0.000058001s
[INFO] 10.244.0.19:57925 - 35638 "AAAA IN hello-world-app.default.svc.cluster.local.svc.cluster.local. udp 77 false 512" NXDOMAIN qr,aa,rd 170 0.000040147s
[INFO] 10.244.0.19:50716 - 9934 "AAAA IN hello-world-app.default.svc.cluster.local.svc.cluster.local. udp 77 false 512" NXDOMAIN qr,aa,rd 170 0.00003689s
[INFO] 10.244.0.19:57925 - 58247 "A IN hello-world-app.default.svc.cluster.local.cluster.local. udp 73 false 512" NXDOMAIN qr,aa,rd 166 0.000042995s
[INFO] 10.244.0.19:50716 - 58398 "A IN hello-world-app.default.svc.cluster.local.cluster.local. udp 73 false 512" NXDOMAIN qr,aa,rd 166 0.000041321s
[INFO] 10.244.0.19:57925 - 54818 "AAAA IN hello-world-app.default.svc.cluster.local.cluster.local. udp 73 false 512" NXDOMAIN qr,aa,rd 166 0.0000405s
[INFO] 10.244.0.19:50716 - 33444 "AAAA IN hello-world-app.default.svc.cluster.local.cluster.local. udp 73 false 512" NXDOMAIN qr,aa,rd 166 0.000036505s
[INFO] 10.244.0.19:57925 - 19605 "A IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.001280721s
[INFO] 10.244.0.19:50716 - 8377 "A IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.001275781s
[INFO] 10.244.0.19:57925 - 35710 "AAAA IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.001079426s
[INFO] 10.244.0.19:50716 - 14841 "AAAA IN hello-world-app.default.svc.cluster.local.us-east-2.compute.internal. udp 86 false 512" NXDOMAIN qr,rd,ra 86 0.001064772s
[INFO] 10.244.0.19:57925 - 4722 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.00005179s
[INFO] 10.244.0.19:50716 - 57692 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.00003506s
==> describe nodes <==
Name: addons-122411
Roles: control-plane
Labels: beta.kubernetes.io/arch=arm64
beta.kubernetes.io/os=linux
kubernetes.io/arch=arm64
kubernetes.io/hostname=addons-122411
kubernetes.io/os=linux
minikube.k8s.io/commit=eceebabcbdee8f7e371d6df61e2829908b6c6abe
minikube.k8s.io/name=addons-122411
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2024_03_14T00_21_26_0700
minikube.k8s.io/version=v1.32.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
topology.hostpath.csi/node=addons-122411
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///run/containerd/containerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Thu, 14 Mar 2024 00:21:22 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: addons-122411
AcquireTime: <unset>
RenewTime: Thu, 14 Mar 2024 00:23:47 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Thu, 14 Mar 2024 00:23:28 +0000 Thu, 14 Mar 2024 00:21:19 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Thu, 14 Mar 2024 00:23:28 +0000 Thu, 14 Mar 2024 00:21:19 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Thu, 14 Mar 2024 00:23:28 +0000 Thu, 14 Mar 2024 00:21:19 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Thu, 14 Mar 2024 00:23:28 +0000 Thu, 14 Mar 2024 00:21:35 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.49.2
Hostname: addons-122411
Capacity:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022500Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022500Ki
pods: 110
System Info:
Machine ID: 8c735ed4ae0740e7bb1b9548d57bee0f
System UUID: 79f50cbe-c3ed-4396-addd-8d47d04f9fde
Boot ID: ae603cd7-e506-4ea2-a0e0-984864774a93
Kernel Version: 5.15.0-1055-aws
OS Image: Ubuntu 22.04.4 LTS
Operating System: linux
Architecture: arm64
Container Runtime Version: containerd://1.6.28
Kubelet Version: v1.28.4
Kube-Proxy Version: v1.28.4
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (17 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default cloud-spanner-emulator-6548d5df46-9pvwd 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 2m9s
default hello-world-app-5d77478584-ktm7f 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 27s
default nginx 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 37s
gcp-auth gcp-auth-5f6b4f85fd-86hlm 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 2m1s
kube-system coredns-5dd5756b68-zfmbr 100m (5%!)(MISSING) 0 (0%!)(MISSING) 70Mi (0%!)(MISSING) 170Mi (2%!)(MISSING) 2m12s
kube-system etcd-addons-122411 100m (5%!)(MISSING) 0 (0%!)(MISSING) 100Mi (1%!)(MISSING) 0 (0%!)(MISSING) 2m25s
kube-system kindnet-84kzz 100m (5%!)(MISSING) 100m (5%!)(MISSING) 50Mi (0%!)(MISSING) 50Mi (0%!)(MISSING) 2m13s
kube-system kube-apiserver-addons-122411 250m (12%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 2m27s
kube-system kube-controller-manager-addons-122411 200m (10%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 2m25s
kube-system kube-proxy-l8qg6 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 2m13s
kube-system kube-scheduler-addons-122411 100m (5%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 2m27s
kube-system nvidia-device-plugin-daemonset-98jl7 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 2m10s
kube-system snapshot-controller-58dbcc7b99-k5hps 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 2m5s
kube-system snapshot-controller-58dbcc7b99-s5lrs 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 2m5s
kube-system storage-provisioner 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 2m7s
local-path-storage local-path-provisioner-78b46b4d5c-8mmnx 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 2m5s
yakd-dashboard yakd-dashboard-9947fc6bf-7xh2x 0 (0%!)(MISSING) 0 (0%!)(MISSING) 128Mi (1%!)(MISSING) 256Mi (3%!)(MISSING) 2m6s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (42%!)(MISSING) 100m (5%!)(MISSING)
memory 348Mi (4%!)(MISSING) 476Mi (6%!)(MISSING)
ephemeral-storage 0 (0%!)(MISSING) 0 (0%!)(MISSING)
hugepages-1Gi 0 (0%!)(MISSING) 0 (0%!)(MISSING)
hugepages-2Mi 0 (0%!)(MISSING) 0 (0%!)(MISSING)
hugepages-32Mi 0 (0%!)(MISSING) 0 (0%!)(MISSING)
hugepages-64Ki 0 (0%!)(MISSING) 0 (0%!)(MISSING)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 2m10s kube-proxy
Normal Starting 2m25s kubelet Starting kubelet.
Normal NodeHasSufficientMemory 2m25s kubelet Node addons-122411 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 2m25s kubelet Node addons-122411 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 2m25s kubelet Node addons-122411 status is now: NodeHasSufficientPID
Normal NodeNotReady 2m25s kubelet Node addons-122411 status is now: NodeNotReady
Normal NodeAllocatableEnforced 2m25s kubelet Updated Node Allocatable limit across pods
Normal NodeReady 2m15s kubelet Node addons-122411 status is now: NodeReady
Normal RegisteredNode 2m13s node-controller Node addons-122411 event: Registered Node addons-122411 in Controller
==> dmesg <==
[ +0.001356] FS-Cache: O-key=[8] '13435c0100000000'
[ +0.000805] FS-Cache: N-cookie c=00000066 [p=0000005d fl=2 nc=0 na=1]
[ +0.001045] FS-Cache: N-cookie d=00000000dfa10ab5{9p.inode} n=00000000d8f48c7d
[ +0.001277] FS-Cache: N-key=[8] '13435c0100000000'
[ +0.002503] FS-Cache: Duplicate cookie detected
[ +0.000822] FS-Cache: O-cookie c=00000060 [p=0000005d fl=226 nc=0 na=1]
[ +0.001194] FS-Cache: O-cookie d=00000000dfa10ab5{9p.inode} n=00000000ab28f1bc
[ +0.001304] FS-Cache: O-key=[8] '13435c0100000000'
[ +0.000805] FS-Cache: N-cookie c=00000067 [p=0000005d fl=2 nc=0 na=1]
[ +0.000940] FS-Cache: N-cookie d=00000000dfa10ab5{9p.inode} n=0000000005bfc23d
[ +0.001296] FS-Cache: N-key=[8] '13435c0100000000'
[ +1.948127] FS-Cache: Duplicate cookie detected
[ +0.000743] FS-Cache: O-cookie c=0000005e [p=0000005d fl=226 nc=0 na=1]
[ +0.000996] FS-Cache: O-cookie d=00000000dfa10ab5{9p.inode} n=0000000047416d4f
[ +0.001160] FS-Cache: O-key=[8] '12435c0100000000'
[ +0.000711] FS-Cache: N-cookie c=00000069 [p=0000005d fl=2 nc=0 na=1]
[ +0.000981] FS-Cache: N-cookie d=00000000dfa10ab5{9p.inode} n=00000000ddb7d408
[ +0.001200] FS-Cache: N-key=[8] '12435c0100000000'
[ +0.277874] FS-Cache: Duplicate cookie detected
[ +0.000809] FS-Cache: O-cookie c=00000063 [p=0000005d fl=226 nc=0 na=1]
[ +0.000965] FS-Cache: O-cookie d=00000000dfa10ab5{9p.inode} n=000000008040a2ac
[ +0.001136] FS-Cache: O-key=[8] '18435c0100000000'
[ +0.000845] FS-Cache: N-cookie c=0000006a [p=0000005d fl=2 nc=0 na=1]
[ +0.000985] FS-Cache: N-cookie d=00000000dfa10ab5{9p.inode} n=0000000081a41d92
[ +0.001094] FS-Cache: N-key=[8] '18435c0100000000'
==> etcd [6011bc5d47236f232ce06b9abfd2a40cab18b43e83889bf618d2cea3add70cbe] <==
{"level":"info","ts":"2024-03-14T00:21:18.748692Z","caller":"embed/etcd.go:597","msg":"serving peer traffic","address":"192.168.49.2:2380"}
{"level":"info","ts":"2024-03-14T00:21:18.748706Z","caller":"embed/etcd.go:569","msg":"cmux::serve","address":"192.168.49.2:2380"}
{"level":"info","ts":"2024-03-14T00:21:18.749434Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc switched to configuration voters=(12593026477526642892)"}
{"level":"info","ts":"2024-03-14T00:21:18.749522Z","caller":"membership/cluster.go:421","msg":"added member","cluster-id":"fa54960ea34d58be","local-member-id":"aec36adc501070cc","added-peer-id":"aec36adc501070cc","added-peer-peer-urls":["https://192.168.49.2:2380"]}
{"level":"info","ts":"2024-03-14T00:21:18.749581Z","caller":"fileutil/purge.go:44","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/snap","suffix":"snap.db","max":5,"interval":"30s"}
{"level":"info","ts":"2024-03-14T00:21:18.749608Z","caller":"fileutil/purge.go:44","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/snap","suffix":"snap","max":5,"interval":"30s"}
{"level":"info","ts":"2024-03-14T00:21:18.749616Z","caller":"fileutil/purge.go:44","msg":"started to purge file","dir":"/var/lib/minikube/etcd/member/wal","suffix":"wal","max":5,"interval":"30s"}
{"level":"info","ts":"2024-03-14T00:21:19.315249Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc is starting a new election at term 1"}
{"level":"info","ts":"2024-03-14T00:21:19.315361Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became pre-candidate at term 1"}
{"level":"info","ts":"2024-03-14T00:21:19.315467Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc received MsgPreVoteResp from aec36adc501070cc at term 1"}
{"level":"info","ts":"2024-03-14T00:21:19.315591Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became candidate at term 2"}
{"level":"info","ts":"2024-03-14T00:21:19.315686Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc received MsgVoteResp from aec36adc501070cc at term 2"}
{"level":"info","ts":"2024-03-14T00:21:19.315776Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became leader at term 2"}
{"level":"info","ts":"2024-03-14T00:21:19.315887Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: aec36adc501070cc elected leader aec36adc501070cc at term 2"}
{"level":"info","ts":"2024-03-14T00:21:19.322244Z","caller":"etcdserver/server.go:2571","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2024-03-14T00:21:19.32684Z","caller":"etcdserver/server.go:2062","msg":"published local member to cluster through raft","local-member-id":"aec36adc501070cc","local-member-attributes":"{Name:addons-122411 ClientURLs:[https://192.168.49.2:2379]}","request-path":"/0/members/aec36adc501070cc/attributes","cluster-id":"fa54960ea34d58be","publish-timeout":"7s"}
{"level":"info","ts":"2024-03-14T00:21:19.327135Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2024-03-14T00:21:19.328409Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.49.2:2379"}
{"level":"info","ts":"2024-03-14T00:21:19.328694Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2024-03-14T00:21:19.330165Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2024-03-14T00:21:19.330286Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2024-03-14T00:21:19.338789Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"info","ts":"2024-03-14T00:21:19.34258Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"fa54960ea34d58be","local-member-id":"aec36adc501070cc","cluster-version":"3.5"}
{"level":"info","ts":"2024-03-14T00:21:19.342833Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2024-03-14T00:21:19.343241Z","caller":"etcdserver/server.go:2595","msg":"cluster version is updated","cluster-version":"3.5"}
==> gcp-auth [99e901f3c5fb8c2e2dfe0b7d4507105ab75a009b136b1b41e0327d620a7de5d3] <==
2024/03/14 00:22:31 GCP Auth Webhook started!
2024/03/14 00:22:49 Ready to marshal response ...
2024/03/14 00:22:49 Ready to write response ...
2024/03/14 00:23:13 Ready to marshal response ...
2024/03/14 00:23:13 Ready to write response ...
2024/03/14 00:23:16 Ready to marshal response ...
2024/03/14 00:23:16 Ready to write response ...
2024/03/14 00:23:23 Ready to marshal response ...
2024/03/14 00:23:23 Ready to write response ...
2024/03/14 00:23:38 Ready to marshal response ...
2024/03/14 00:23:38 Ready to write response ...
==> kernel <==
00:23:50 up 8:06, 0 users, load average: 2.68, 2.77, 2.73
Linux addons-122411 5.15.0-1055-aws #60~20.04.1-Ubuntu SMP Thu Feb 22 15:54:21 UTC 2024 aarch64 aarch64 aarch64 GNU/Linux
PRETTY_NAME="Ubuntu 22.04.4 LTS"
==> kindnet [fd41927d5b5960f3600296f2a202c97121894b97996da03ecdffc82c6916cb4d] <==
I0314 00:21:42.011265 1 main.go:227] handling current node
I0314 00:21:52.037526 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
I0314 00:21:52.037560 1 main.go:227] handling current node
I0314 00:22:02.049411 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
I0314 00:22:02.049442 1 main.go:227] handling current node
I0314 00:22:12.064246 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
I0314 00:22:12.064287 1 main.go:227] handling current node
I0314 00:22:22.078123 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
I0314 00:22:22.078162 1 main.go:227] handling current node
I0314 00:22:32.090908 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
I0314 00:22:32.090938 1 main.go:227] handling current node
I0314 00:22:42.098457 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
I0314 00:22:42.098490 1 main.go:227] handling current node
I0314 00:22:52.110771 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
I0314 00:22:52.110800 1 main.go:227] handling current node
I0314 00:23:02.126577 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
I0314 00:23:02.126609 1 main.go:227] handling current node
I0314 00:23:12.136941 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
I0314 00:23:12.136978 1 main.go:227] handling current node
I0314 00:23:22.141711 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
I0314 00:23:22.141742 1 main.go:227] handling current node
I0314 00:23:32.152771 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
I0314 00:23:32.152801 1 main.go:227] handling current node
I0314 00:23:42.284383 1 main.go:223] Handling node with IPs: map[192.168.49.2:{}]
I0314 00:23:42.284415 1 main.go:227] handling current node
==> kube-apiserver [4a5e785fd0a00d963f357c98432c53e43acef2f961f3a29fc053f7ee1094d4ca] <==
W0314 00:21:47.174705 1 aggregator.go:166] failed to download v1beta1.metrics.k8s.io: failed to retrieve openAPI spec, http error: ResponseCode: 503, Body: service unavailable
, Header: map[Content-Type:[text/plain; charset=utf-8] X-Content-Type-Options:[nosniff]]
I0314 00:21:47.648506 1 alloc.go:330] "allocated clusterIPs" service="kube-system/csi-hostpath-attacher" clusterIPs={"IPv4":"10.98.197.71"}
I0314 00:21:47.674479 1 controller.go:624] quota admission added evaluator for: statefulsets.apps
I0314 00:21:47.895869 1 alloc.go:330] "allocated clusterIPs" service="kube-system/csi-hostpath-resizer" clusterIPs={"IPv4":"10.97.102.11"}
W0314 00:21:48.444121 1 aggregator.go:166] failed to download v1beta1.metrics.k8s.io: failed to retrieve openAPI spec, http error: ResponseCode: 503, Body: service unavailable
, Header: map[Content-Type:[text/plain; charset=utf-8] X-Content-Type-Options:[nosniff]]
I0314 00:21:49.234209 1 alloc.go:330] "allocated clusterIPs" service="gcp-auth/gcp-auth" clusterIPs={"IPv4":"10.105.118.115"}
E0314 00:22:03.321993 1 available_controller.go:460] v1beta1.metrics.k8s.io failed with: failing or missing response from https://10.105.40.108:443/apis/metrics.k8s.io/v1beta1: Get "https://10.105.40.108:443/apis/metrics.k8s.io/v1beta1": dial tcp 10.105.40.108:443: connect: connection refused
W0314 00:22:03.322335 1 handler_proxy.go:93] no RequestInfo found in the context
E0314 00:22:03.322394 1 controller.go:146] Error updating APIService "v1beta1.metrics.k8s.io" with err: failed to download v1beta1.metrics.k8s.io: failed to retrieve openAPI spec, http error: ResponseCode: 503, Body: service unavailable
, Header: map[Content-Type:[text/plain; charset=utf-8] X-Content-Type-Options:[nosniff]]
E0314 00:22:03.322972 1 available_controller.go:460] v1beta1.metrics.k8s.io failed with: failing or missing response from https://10.105.40.108:443/apis/metrics.k8s.io/v1beta1: Get "https://10.105.40.108:443/apis/metrics.k8s.io/v1beta1": dial tcp 10.105.40.108:443: connect: connection refused
I0314 00:22:03.323320 1 handler.go:232] Adding GroupVersion metrics.k8s.io v1beta1 to ResourceManager
E0314 00:22:03.328943 1 available_controller.go:460] v1beta1.metrics.k8s.io failed with: failing or missing response from https://10.105.40.108:443/apis/metrics.k8s.io/v1beta1: Get "https://10.105.40.108:443/apis/metrics.k8s.io/v1beta1": dial tcp 10.105.40.108:443: connect: connection refused
I0314 00:22:03.424995 1 handler.go:232] Adding GroupVersion metrics.k8s.io v1beta1 to ResourceManager
I0314 00:22:21.774984 1 handler.go:232] Adding GroupVersion metrics.k8s.io v1beta1 to ResourceManager
I0314 00:23:04.331914 1 controller.go:129] OpenAPI AggregationController: action for item v1beta1.metrics.k8s.io: Nothing (removed from the queue).
I0314 00:23:07.747716 1 handler.go:232] Adding GroupVersion gadget.kinvolk.io v1alpha1 to ResourceManager
I0314 00:23:07.760663 1 handler.go:232] Adding GroupVersion gadget.kinvolk.io v1alpha1 to ResourceManager
W0314 00:23:08.778587 1 cacher.go:171] Terminating all watchers from cacher traces.gadget.kinvolk.io
I0314 00:23:13.300543 1 controller.go:624] quota admission added evaluator for: ingresses.networking.k8s.io
I0314 00:23:13.626056 1 alloc.go:330] "allocated clusterIPs" service="default/nginx" clusterIPs={"IPv4":"10.98.143.1"}
I0314 00:23:23.359653 1 alloc.go:330] "allocated clusterIPs" service="default/hello-world-app" clusterIPs={"IPv4":"10.108.80.200"}
I0314 00:23:28.030106 1 controller.go:624] quota admission added evaluator for: volumesnapshots.snapshot.storage.k8s.io
==> kube-controller-manager [e3a90640ceaa5070a7e644a8c3ec86d0b11c16330a01f8b0a68bb54168056b8b] <==
E0314 00:23:17.993143 1 reflector.go:147] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0314 00:23:23.089518 1 event.go:307] "Event occurred" object="default/hello-world-app" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set hello-world-app-5d77478584 to 1"
I0314 00:23:23.110930 1 event.go:307] "Event occurred" object="default/hello-world-app-5d77478584" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: hello-world-app-5d77478584-ktm7f"
I0314 00:23:23.138812 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="default/hello-world-app-5d77478584" duration="48.899042ms"
I0314 00:23:23.169955 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="default/hello-world-app-5d77478584" duration="31.092096ms"
I0314 00:23:23.194806 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="default/hello-world-app-5d77478584" duration="24.798736ms"
I0314 00:23:23.195001 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="default/hello-world-app-5d77478584" duration="72.648µs"
W0314 00:23:26.001202 1 reflector.go:535] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0314 00:23:26.001243 1 reflector.go:147] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0314 00:23:26.124664 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="default/hello-world-app-5d77478584" duration="74.814µs"
I0314 00:23:27.149972 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="default/hello-world-app-5d77478584" duration="43.495µs"
I0314 00:23:28.173650 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="default/hello-world-app-5d77478584" duration="70.777µs"
I0314 00:23:30.576504 1 event.go:307] "Event occurred" object="default/hpvc-restore" fieldPath="" kind="PersistentVolumeClaim" apiVersion="v1" type="Normal" reason="ExternalProvisioning" message="Waiting for a volume to be created either by the external provisioner 'hostpath.csi.k8s.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered."
I0314 00:23:37.188492 1 event.go:307] "Event occurred" object="default/hpvc-restore" fieldPath="" kind="PersistentVolumeClaim" apiVersion="v1" type="Normal" reason="ExternalProvisioning" message="Waiting for a volume to be created either by the external provisioner 'hostpath.csi.k8s.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered."
I0314 00:23:37.419223 1 shared_informer.go:311] Waiting for caches to sync for resource quota
I0314 00:23:37.419269 1 shared_informer.go:318] Caches are synced for resource quota
I0314 00:23:38.157208 1 event.go:307] "Event occurred" object="default/hpvc-restore" fieldPath="" kind="PersistentVolumeClaim" apiVersion="v1" type="Normal" reason="ExternalProvisioning" message="Waiting for a volume to be created either by the external provisioner 'hostpath.csi.k8s.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered."
W0314 00:23:40.506800 1 reflector.go:535] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0314 00:23:40.506841 1 reflector.go:147] vendor/k8s.io/client-go/metadata/metadatainformer/informer.go:106: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
I0314 00:23:40.907079 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="ingress-nginx/ingress-nginx-controller-76dc478dd8" duration="6.794µs"
I0314 00:23:40.907378 1 job_controller.go:562] "enqueueing job" key="ingress-nginx/ingress-nginx-admission-create"
I0314 00:23:40.918280 1 job_controller.go:562] "enqueueing job" key="ingress-nginx/ingress-nginx-admission-patch"
I0314 00:23:42.210496 1 replica_set.go:676] "Finished syncing" kind="ReplicaSet" key="default/hello-world-app-5d77478584" duration="116.799µs"
I0314 00:23:48.901488 1 stateful_set.go:458] "StatefulSet has been deleted" key="kube-system/csi-hostpath-attacher"
I0314 00:23:48.988235 1 stateful_set.go:458] "StatefulSet has been deleted" key="kube-system/csi-hostpath-resizer"
==> kube-proxy [7efda6344d4a8449a9484d899dc41810be1633baf6a5a5b65e012e0d40e73a53] <==
I0314 00:21:39.889434 1 server_others.go:69] "Using iptables proxy"
I0314 00:21:39.916316 1 node.go:141] Successfully retrieved node IP: 192.168.49.2
I0314 00:21:39.959826 1 server.go:632] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I0314 00:21:39.962443 1 server_others.go:152] "Using iptables Proxier"
I0314 00:21:39.962483 1 server_others.go:421] "Detect-local-mode set to ClusterCIDR, but no cluster CIDR for family" ipFamily="IPv6"
I0314 00:21:39.962499 1 server_others.go:438] "Defaulting to no-op detect-local"
I0314 00:21:39.962535 1 proxier.go:251] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses"
I0314 00:21:39.962751 1 server.go:846] "Version info" version="v1.28.4"
I0314 00:21:39.962765 1 server.go:848] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I0314 00:21:39.971799 1 config.go:188] "Starting service config controller"
I0314 00:21:39.971845 1 shared_informer.go:311] Waiting for caches to sync for service config
I0314 00:21:39.971868 1 config.go:97] "Starting endpoint slice config controller"
I0314 00:21:39.971876 1 shared_informer.go:311] Waiting for caches to sync for endpoint slice config
I0314 00:21:39.973627 1 config.go:315] "Starting node config controller"
I0314 00:21:39.973641 1 shared_informer.go:311] Waiting for caches to sync for node config
I0314 00:21:40.072205 1 shared_informer.go:318] Caches are synced for endpoint slice config
I0314 00:21:40.072275 1 shared_informer.go:318] Caches are synced for service config
I0314 00:21:40.075092 1 shared_informer.go:318] Caches are synced for node config
==> kube-scheduler [bedee29a8b76861ab9447473f45d0309be1f6d3838aaa2897b7c88305a29d3c8] <==
W0314 00:21:22.034246 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
E0314 00:21:22.034263 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
W0314 00:21:22.034333 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E0314 00:21:22.034356 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
W0314 00:21:22.034413 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E0314 00:21:22.034429 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
W0314 00:21:22.034589 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
E0314 00:21:22.034691 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
W0314 00:21:22.849944 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
E0314 00:21:22.850204 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
W0314 00:21:22.859619 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
E0314 00:21:22.859652 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
W0314 00:21:22.883173 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E0314 00:21:22.883383 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
W0314 00:21:22.927842 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
E0314 00:21:22.928489 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
W0314 00:21:22.929541 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
E0314 00:21:22.929720 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
W0314 00:21:22.951107 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
E0314 00:21:22.951161 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
W0314 00:21:23.055327 1 reflector.go:535] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
E0314 00:21:23.055367 1 reflector.go:147] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
W0314 00:21:23.126197 1 reflector.go:535] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E0314 00:21:23.126419 1 reflector.go:147] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
I0314 00:21:25.004455 1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Mar 14 00:23:50 addons-122411 kubelet[1492]: I0314 00:23:50.408920 1492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"containerd","ID":"e5b57a2274e94c4fcb757fd326ce6dc630d6dd4ce269bcfde6c05b9095fc75b0"} err="failed to get container status \"e5b57a2274e94c4fcb757fd326ce6dc630d6dd4ce269bcfde6c05b9095fc75b0\": rpc error: code = NotFound desc = an error occurred when try to find container \"e5b57a2274e94c4fcb757fd326ce6dc630d6dd4ce269bcfde6c05b9095fc75b0\": not found"
Mar 14 00:23:50 addons-122411 kubelet[1492]: I0314 00:23:50.408950 1492 scope.go:117] "RemoveContainer" containerID="8f6aedeb20a63c51e31ebf66c078cb46b990f0c2e6ad8a1dbc8666f7080034ef"
Mar 14 00:23:50 addons-122411 kubelet[1492]: I0314 00:23:50.409286 1492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"containerd","ID":"8f6aedeb20a63c51e31ebf66c078cb46b990f0c2e6ad8a1dbc8666f7080034ef"} err="failed to get container status \"8f6aedeb20a63c51e31ebf66c078cb46b990f0c2e6ad8a1dbc8666f7080034ef\": rpc error: code = NotFound desc = an error occurred when try to find container \"8f6aedeb20a63c51e31ebf66c078cb46b990f0c2e6ad8a1dbc8666f7080034ef\": not found"
Mar 14 00:23:50 addons-122411 kubelet[1492]: I0314 00:23:50.409313 1492 scope.go:117] "RemoveContainer" containerID="fefe2ff132232a17d5cb6a717cf2a3434a946a917d0b6fc059004b017cbbd7be"
Mar 14 00:23:50 addons-122411 kubelet[1492]: I0314 00:23:50.409709 1492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"containerd","ID":"fefe2ff132232a17d5cb6a717cf2a3434a946a917d0b6fc059004b017cbbd7be"} err="failed to get container status \"fefe2ff132232a17d5cb6a717cf2a3434a946a917d0b6fc059004b017cbbd7be\": rpc error: code = NotFound desc = an error occurred when try to find container \"fefe2ff132232a17d5cb6a717cf2a3434a946a917d0b6fc059004b017cbbd7be\": not found"
Mar 14 00:23:50 addons-122411 kubelet[1492]: I0314 00:23:50.409736 1492 scope.go:117] "RemoveContainer" containerID="95780c4d10a9e477127ccb65b77fb2c87377e43999e418a0b04b4580c35ac5f3"
Mar 14 00:23:50 addons-122411 kubelet[1492]: I0314 00:23:50.410080 1492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"containerd","ID":"95780c4d10a9e477127ccb65b77fb2c87377e43999e418a0b04b4580c35ac5f3"} err="failed to get container status \"95780c4d10a9e477127ccb65b77fb2c87377e43999e418a0b04b4580c35ac5f3\": rpc error: code = NotFound desc = an error occurred when try to find container \"95780c4d10a9e477127ccb65b77fb2c87377e43999e418a0b04b4580c35ac5f3\": not found"
Mar 14 00:23:50 addons-122411 kubelet[1492]: I0314 00:23:50.410111 1492 scope.go:117] "RemoveContainer" containerID="a6a4c9e531ee911d190f485629532c8a7a726b22512f13ad9b94386320bb44da"
Mar 14 00:23:50 addons-122411 kubelet[1492]: I0314 00:23:50.410442 1492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"containerd","ID":"a6a4c9e531ee911d190f485629532c8a7a726b22512f13ad9b94386320bb44da"} err="failed to get container status \"a6a4c9e531ee911d190f485629532c8a7a726b22512f13ad9b94386320bb44da\": rpc error: code = NotFound desc = an error occurred when try to find container \"a6a4c9e531ee911d190f485629532c8a7a726b22512f13ad9b94386320bb44da\": not found"
Mar 14 00:23:50 addons-122411 kubelet[1492]: I0314 00:23:50.410468 1492 scope.go:117] "RemoveContainer" containerID="1c5c881a17d345a78c4e70cd883551cedcc834fdf4e5ef82b90204e4de7591bb"
Mar 14 00:23:50 addons-122411 kubelet[1492]: I0314 00:23:50.410907 1492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"containerd","ID":"1c5c881a17d345a78c4e70cd883551cedcc834fdf4e5ef82b90204e4de7591bb"} err="failed to get container status \"1c5c881a17d345a78c4e70cd883551cedcc834fdf4e5ef82b90204e4de7591bb\": rpc error: code = NotFound desc = an error occurred when try to find container \"1c5c881a17d345a78c4e70cd883551cedcc834fdf4e5ef82b90204e4de7591bb\": not found"
Mar 14 00:23:50 addons-122411 kubelet[1492]: I0314 00:23:50.410930 1492 scope.go:117] "RemoveContainer" containerID="e5b57a2274e94c4fcb757fd326ce6dc630d6dd4ce269bcfde6c05b9095fc75b0"
Mar 14 00:23:50 addons-122411 kubelet[1492]: I0314 00:23:50.411326 1492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"containerd","ID":"e5b57a2274e94c4fcb757fd326ce6dc630d6dd4ce269bcfde6c05b9095fc75b0"} err="failed to get container status \"e5b57a2274e94c4fcb757fd326ce6dc630d6dd4ce269bcfde6c05b9095fc75b0\": rpc error: code = NotFound desc = an error occurred when try to find container \"e5b57a2274e94c4fcb757fd326ce6dc630d6dd4ce269bcfde6c05b9095fc75b0\": not found"
Mar 14 00:23:50 addons-122411 kubelet[1492]: I0314 00:23:50.411354 1492 scope.go:117] "RemoveContainer" containerID="8f6aedeb20a63c51e31ebf66c078cb46b990f0c2e6ad8a1dbc8666f7080034ef"
Mar 14 00:23:50 addons-122411 kubelet[1492]: I0314 00:23:50.411693 1492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"containerd","ID":"8f6aedeb20a63c51e31ebf66c078cb46b990f0c2e6ad8a1dbc8666f7080034ef"} err="failed to get container status \"8f6aedeb20a63c51e31ebf66c078cb46b990f0c2e6ad8a1dbc8666f7080034ef\": rpc error: code = NotFound desc = an error occurred when try to find container \"8f6aedeb20a63c51e31ebf66c078cb46b990f0c2e6ad8a1dbc8666f7080034ef\": not found"
Mar 14 00:23:50 addons-122411 kubelet[1492]: I0314 00:23:50.411719 1492 scope.go:117] "RemoveContainer" containerID="fefe2ff132232a17d5cb6a717cf2a3434a946a917d0b6fc059004b017cbbd7be"
Mar 14 00:23:50 addons-122411 kubelet[1492]: I0314 00:23:50.412090 1492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"containerd","ID":"fefe2ff132232a17d5cb6a717cf2a3434a946a917d0b6fc059004b017cbbd7be"} err="failed to get container status \"fefe2ff132232a17d5cb6a717cf2a3434a946a917d0b6fc059004b017cbbd7be\": rpc error: code = NotFound desc = an error occurred when try to find container \"fefe2ff132232a17d5cb6a717cf2a3434a946a917d0b6fc059004b017cbbd7be\": not found"
Mar 14 00:23:50 addons-122411 kubelet[1492]: I0314 00:23:50.412120 1492 scope.go:117] "RemoveContainer" containerID="95780c4d10a9e477127ccb65b77fb2c87377e43999e418a0b04b4580c35ac5f3"
Mar 14 00:23:50 addons-122411 kubelet[1492]: I0314 00:23:50.412455 1492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"containerd","ID":"95780c4d10a9e477127ccb65b77fb2c87377e43999e418a0b04b4580c35ac5f3"} err="failed to get container status \"95780c4d10a9e477127ccb65b77fb2c87377e43999e418a0b04b4580c35ac5f3\": rpc error: code = NotFound desc = an error occurred when try to find container \"95780c4d10a9e477127ccb65b77fb2c87377e43999e418a0b04b4580c35ac5f3\": not found"
Mar 14 00:23:50 addons-122411 kubelet[1492]: I0314 00:23:50.412480 1492 scope.go:117] "RemoveContainer" containerID="a6a4c9e531ee911d190f485629532c8a7a726b22512f13ad9b94386320bb44da"
Mar 14 00:23:50 addons-122411 kubelet[1492]: I0314 00:23:50.412807 1492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"containerd","ID":"a6a4c9e531ee911d190f485629532c8a7a726b22512f13ad9b94386320bb44da"} err="failed to get container status \"a6a4c9e531ee911d190f485629532c8a7a726b22512f13ad9b94386320bb44da\": rpc error: code = NotFound desc = an error occurred when try to find container \"a6a4c9e531ee911d190f485629532c8a7a726b22512f13ad9b94386320bb44da\": not found"
Mar 14 00:23:50 addons-122411 kubelet[1492]: I0314 00:23:50.412834 1492 scope.go:117] "RemoveContainer" containerID="1c5c881a17d345a78c4e70cd883551cedcc834fdf4e5ef82b90204e4de7591bb"
Mar 14 00:23:50 addons-122411 kubelet[1492]: I0314 00:23:50.413211 1492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"containerd","ID":"1c5c881a17d345a78c4e70cd883551cedcc834fdf4e5ef82b90204e4de7591bb"} err="failed to get container status \"1c5c881a17d345a78c4e70cd883551cedcc834fdf4e5ef82b90204e4de7591bb\": rpc error: code = NotFound desc = an error occurred when try to find container \"1c5c881a17d345a78c4e70cd883551cedcc834fdf4e5ef82b90204e4de7591bb\": not found"
Mar 14 00:23:50 addons-122411 kubelet[1492]: I0314 00:23:50.413270 1492 scope.go:117] "RemoveContainer" containerID="e5b57a2274e94c4fcb757fd326ce6dc630d6dd4ce269bcfde6c05b9095fc75b0"
Mar 14 00:23:50 addons-122411 kubelet[1492]: I0314 00:23:50.413583 1492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"containerd","ID":"e5b57a2274e94c4fcb757fd326ce6dc630d6dd4ce269bcfde6c05b9095fc75b0"} err="failed to get container status \"e5b57a2274e94c4fcb757fd326ce6dc630d6dd4ce269bcfde6c05b9095fc75b0\": rpc error: code = NotFound desc = an error occurred when try to find container \"e5b57a2274e94c4fcb757fd326ce6dc630d6dd4ce269bcfde6c05b9095fc75b0\": not found"
==> storage-provisioner [61ca94ba6881646e2b5eb6bd1606199b57a96f2190cd6475bbaf33bc8c153b2c] <==
I0314 00:21:45.151246 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I0314 00:21:45.250752 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I0314 00:21:45.250810 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I0314 00:21:45.348458 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I0314 00:21:45.349268 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_addons-122411_8c552f76-9839-4c14-914b-134ef230b25e!
I0314 00:21:45.357062 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"81f0a142-3e65-44d5-af86-bb2d2a0b6331", APIVersion:"v1", ResourceVersion:"624", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' addons-122411_8c552f76-9839-4c14-914b-134ef230b25e became leader
I0314 00:21:45.450086 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_addons-122411_8c552f76-9839-4c14-914b-134ef230b25e!
-- /stdout --
helpers_test.go:254: (dbg) Run: out/minikube-linux-arm64 status --format={{.APIServer}} -p addons-122411 -n addons-122411
helpers_test.go:261: (dbg) Run: kubectl --context addons-122411 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:285: <<< TestAddons/parallel/Ingress FAILED: end of post-mortem logs <<<
helpers_test.go:286: ---------------------/post-mortem---------------------------------
--- FAIL: TestAddons/parallel/Ingress (38.19s)