=== RUN TestAddons/parallel/Registry
=== PAUSE TestAddons/parallel/Registry
=== CONT TestAddons/parallel/Registry
addons_test.go:328: registry stabilized in 2.755914ms
addons_test.go:330: (dbg) TestAddons/parallel/Registry: waiting 6m0s for pods matching "actual-registry=true" in namespace "kube-system" ...
helpers_test.go:344: "registry-66c9cd494c-lmt9d" [2a3a6aaa-b147-4517-bdc2-529c58ed2d26] Running
addons_test.go:330: (dbg) TestAddons/parallel/Registry: actual-registry=true healthy within 6.003282306s
addons_test.go:333: (dbg) TestAddons/parallel/Registry: waiting 10m0s for pods matching "registry-proxy=true" in namespace "kube-system" ...
helpers_test.go:344: "registry-proxy-2wp2r" [c8ba5e64-c35c-4fdb-8dfb-ede028619b44] Running
addons_test.go:333: (dbg) TestAddons/parallel/Registry: registry-proxy=true healthy within 5.004253868s
addons_test.go:338: (dbg) Run: kubectl --context addons-877987 delete po -l run=registry-test --now
addons_test.go:343: (dbg) Run: kubectl --context addons-877987 run --rm registry-test --restart=Never --image=gcr.io/k8s-minikube/busybox -it -- sh -c "wget --spider -S http://registry.kube-system.svc.cluster.local"
addons_test.go:343: (dbg) Non-zero exit: kubectl --context addons-877987 run --rm registry-test --restart=Never --image=gcr.io/k8s-minikube/busybox -it -- sh -c "wget --spider -S http://registry.kube-system.svc.cluster.local": exit status 1 (1m0.150173785s)
-- stdout --
pod "registry-test" deleted
-- /stdout --
** stderr **
error: timed out waiting for the condition
** /stderr **
addons_test.go:345: failed to hit registry.kube-system.svc.cluster.local. args "kubectl --context addons-877987 run --rm registry-test --restart=Never --image=gcr.io/k8s-minikube/busybox -it -- sh -c \"wget --spider -S http://registry.kube-system.svc.cluster.local\"" failed: exit status 1
addons_test.go:349: expected curl response be "HTTP/1.1 200", but got *pod "registry-test" deleted
*
addons_test.go:357: (dbg) Run: out/minikube-linux-arm64 -p addons-877987 ip
2024/09/20 16:57:32 [DEBUG] GET http://192.168.49.2:5000
addons_test.go:386: (dbg) Run: out/minikube-linux-arm64 -p addons-877987 addons disable registry --alsologtostderr -v=1
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:230: ======> post-mortem[TestAddons/parallel/Registry]: docker inspect <======
helpers_test.go:231: (dbg) Run: docker inspect addons-877987
helpers_test.go:235: (dbg) docker inspect addons-877987:
-- stdout --
[
{
"Id": "d95947c17606bacd3e570a9182c1de827df0aa9fe042efdee7f33d038aec9da4",
"Created": "2024-09-20T16:44:18.821839744Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 8822,
"ExitCode": 0,
"Error": "",
"StartedAt": "2024-09-20T16:44:18.997782345Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:c94982da1293baee77c00993711af197ed62d6b1a4ee12c0caa4f57c70de4fdc",
"ResolvConfPath": "/var/lib/docker/containers/d95947c17606bacd3e570a9182c1de827df0aa9fe042efdee7f33d038aec9da4/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/d95947c17606bacd3e570a9182c1de827df0aa9fe042efdee7f33d038aec9da4/hostname",
"HostsPath": "/var/lib/docker/containers/d95947c17606bacd3e570a9182c1de827df0aa9fe042efdee7f33d038aec9da4/hosts",
"LogPath": "/var/lib/docker/containers/d95947c17606bacd3e570a9182c1de827df0aa9fe042efdee7f33d038aec9da4/d95947c17606bacd3e570a9182c1de827df0aa9fe042efdee7f33d038aec9da4-json.log",
"Name": "/addons-877987",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"addons-877987:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {}
},
"NetworkMode": "addons-877987",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 4194304000,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 8388608000,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"LowerDir": "/var/lib/docker/overlay2/b16cc1c7dd5c336a505a076203b74609a051734b5b94fb24e814acb100192f61-init/diff:/var/lib/docker/overlay2/fab76bcb726d0967c4800d6a9255781ccd228428269d4d62cbf53d43201c9aa2/diff",
"MergedDir": "/var/lib/docker/overlay2/b16cc1c7dd5c336a505a076203b74609a051734b5b94fb24e814acb100192f61/merged",
"UpperDir": "/var/lib/docker/overlay2/b16cc1c7dd5c336a505a076203b74609a051734b5b94fb24e814acb100192f61/diff",
"WorkDir": "/var/lib/docker/overlay2/b16cc1c7dd5c336a505a076203b74609a051734b5b94fb24e814acb100192f61/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "addons-877987",
"Source": "/var/lib/docker/volumes/addons-877987/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "addons-877987",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "addons-877987",
"name.minikube.sigs.k8s.io": "addons-877987",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "1167d286f8d7d2f3677e33ba4c26630abd10800af77ecbd4046ce43bf4d768d7",
"SandboxKey": "/var/run/docker/netns/1167d286f8d7",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32768"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32769"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32772"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32770"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32771"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"addons-877987": {
"IPAMConfig": {
"IPv4Address": "192.168.49.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "02:42:c0:a8:31:02",
"DriverOpts": null,
"NetworkID": "33c3cfe9a990b66d6cf2c66c50c72a77a1b8b28d83efd033fb06040528137544",
"EndpointID": "58759938cdc4a1718c0bfc0494fe277897bfe0ba90204b49befc1c8c67342641",
"Gateway": "192.168.49.1",
"IPAddress": "192.168.49.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"addons-877987",
"d95947c17606"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:239: (dbg) Run: out/minikube-linux-arm64 status --format={{.Host}} -p addons-877987 -n addons-877987
helpers_test.go:244: <<< TestAddons/parallel/Registry FAILED: start of post-mortem logs <<<
helpers_test.go:245: ======> post-mortem[TestAddons/parallel/Registry]: minikube logs <======
helpers_test.go:247: (dbg) Run: out/minikube-linux-arm64 -p addons-877987 logs -n 25
helpers_test.go:247: (dbg) Done: out/minikube-linux-arm64 -p addons-877987 logs -n 25: (1.159785398s)
helpers_test.go:252: TestAddons/parallel/Registry logs:
-- stdout --
==> Audit <==
|---------|--------------------------------------|------------------------|---------|---------|---------------------|---------------------|
| Command | Args | Profile | User | Version | Start Time | End Time |
|---------|--------------------------------------|------------------------|---------|---------|---------------------|---------------------|
| start | -o=json --download-only | download-only-923497 | jenkins | v1.34.0 | 20 Sep 24 16:43 UTC | |
| | -p download-only-923497 | | | | | |
| | --force --alsologtostderr | | | | | |
| | --kubernetes-version=v1.20.0 | | | | | |
| | --container-runtime=docker | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| delete | --all | minikube | jenkins | v1.34.0 | 20 Sep 24 16:43 UTC | 20 Sep 24 16:43 UTC |
| delete | -p download-only-923497 | download-only-923497 | jenkins | v1.34.0 | 20 Sep 24 16:43 UTC | 20 Sep 24 16:43 UTC |
| start | -o=json --download-only | download-only-777196 | jenkins | v1.34.0 | 20 Sep 24 16:43 UTC | |
| | -p download-only-777196 | | | | | |
| | --force --alsologtostderr | | | | | |
| | --kubernetes-version=v1.31.1 | | | | | |
| | --container-runtime=docker | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| delete | --all | minikube | jenkins | v1.34.0 | 20 Sep 24 16:43 UTC | 20 Sep 24 16:43 UTC |
| delete | -p download-only-777196 | download-only-777196 | jenkins | v1.34.0 | 20 Sep 24 16:43 UTC | 20 Sep 24 16:43 UTC |
| delete | -p download-only-923497 | download-only-923497 | jenkins | v1.34.0 | 20 Sep 24 16:43 UTC | 20 Sep 24 16:43 UTC |
| delete | -p download-only-777196 | download-only-777196 | jenkins | v1.34.0 | 20 Sep 24 16:43 UTC | 20 Sep 24 16:43 UTC |
| start | --download-only -p | download-docker-108524 | jenkins | v1.34.0 | 20 Sep 24 16:43 UTC | |
| | download-docker-108524 | | | | | |
| | --alsologtostderr | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| delete | -p download-docker-108524 | download-docker-108524 | jenkins | v1.34.0 | 20 Sep 24 16:43 UTC | 20 Sep 24 16:43 UTC |
| start | --download-only -p | binary-mirror-528781 | jenkins | v1.34.0 | 20 Sep 24 16:43 UTC | |
| | binary-mirror-528781 | | | | | |
| | --alsologtostderr | | | | | |
| | --binary-mirror | | | | | |
| | http://127.0.0.1:37459 | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| delete | -p binary-mirror-528781 | binary-mirror-528781 | jenkins | v1.34.0 | 20 Sep 24 16:43 UTC | 20 Sep 24 16:43 UTC |
| addons | enable dashboard -p | addons-877987 | jenkins | v1.34.0 | 20 Sep 24 16:43 UTC | |
| | addons-877987 | | | | | |
| addons | disable dashboard -p | addons-877987 | jenkins | v1.34.0 | 20 Sep 24 16:43 UTC | |
| | addons-877987 | | | | | |
| start | -p addons-877987 --wait=true | addons-877987 | jenkins | v1.34.0 | 20 Sep 24 16:43 UTC | 20 Sep 24 16:47 UTC |
| | --memory=4000 --alsologtostderr | | | | | |
| | --addons=registry | | | | | |
| | --addons=metrics-server | | | | | |
| | --addons=volumesnapshots | | | | | |
| | --addons=csi-hostpath-driver | | | | | |
| | --addons=gcp-auth | | | | | |
| | --addons=cloud-spanner | | | | | |
| | --addons=inspektor-gadget | | | | | |
| | --addons=storage-provisioner-rancher | | | | | |
| | --addons=nvidia-device-plugin | | | | | |
| | --addons=yakd --addons=volcano | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| | --addons=ingress | | | | | |
| | --addons=ingress-dns | | | | | |
| addons | addons-877987 addons disable | addons-877987 | jenkins | v1.34.0 | 20 Sep 24 16:48 UTC | 20 Sep 24 16:48 UTC |
| | volcano --alsologtostderr -v=1 | | | | | |
| addons | enable headlamp | addons-877987 | jenkins | v1.34.0 | 20 Sep 24 16:56 UTC | 20 Sep 24 16:56 UTC |
| | -p addons-877987 | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-877987 addons disable | addons-877987 | jenkins | v1.34.0 | 20 Sep 24 16:56 UTC | 20 Sep 24 16:56 UTC |
| | headlamp --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| addons | addons-877987 addons | addons-877987 | jenkins | v1.34.0 | 20 Sep 24 16:57 UTC | 20 Sep 24 16:57 UTC |
| | disable csi-hostpath-driver | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-877987 addons | addons-877987 | jenkins | v1.34.0 | 20 Sep 24 16:57 UTC | 20 Sep 24 16:57 UTC |
| | disable volumesnapshots | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-877987 addons | addons-877987 | jenkins | v1.34.0 | 20 Sep 24 16:57 UTC | 20 Sep 24 16:57 UTC |
| | disable metrics-server | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | disable inspektor-gadget -p | addons-877987 | jenkins | v1.34.0 | 20 Sep 24 16:57 UTC | |
| | addons-877987 | | | | | |
| ip | addons-877987 ip | addons-877987 | jenkins | v1.34.0 | 20 Sep 24 16:57 UTC | 20 Sep 24 16:57 UTC |
| addons | addons-877987 addons disable | addons-877987 | jenkins | v1.34.0 | 20 Sep 24 16:57 UTC | 20 Sep 24 16:57 UTC |
| | registry --alsologtostderr | | | | | |
| | -v=1 | | | | | |
|---------|--------------------------------------|------------------------|---------|---------|---------------------|---------------------|
==> Last Start <==
Log file created at: 2024/09/20 16:43:54
Running on machine: ip-172-31-30-239
Binary: Built with gc go1.23.0 for linux/arm64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I0920 16:43:54.520087 8307 out.go:345] Setting OutFile to fd 1 ...
I0920 16:43:54.520275 8307 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0920 16:43:54.520287 8307 out.go:358] Setting ErrFile to fd 2...
I0920 16:43:54.520294 8307 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0920 16:43:54.520585 8307 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19672-2235/.minikube/bin
I0920 16:43:54.521076 8307 out.go:352] Setting JSON to false
I0920 16:43:54.521883 8307 start.go:129] hostinfo: {"hostname":"ip-172-31-30-239","uptime":1586,"bootTime":1726849049,"procs":146,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1070-aws","kernelArch":"aarch64","virtualizationSystem":"","virtualizationRole":"","hostId":"92f46a7d-c249-4c12-924a-77f64874c910"}
I0920 16:43:54.521954 8307 start.go:139] virtualization:
I0920 16:43:54.524687 8307 out.go:177] * [addons-877987] minikube v1.34.0 on Ubuntu 20.04 (arm64)
I0920 16:43:54.527977 8307 out.go:177] - MINIKUBE_LOCATION=19672
I0920 16:43:54.528137 8307 notify.go:220] Checking for updates...
I0920 16:43:54.532790 8307 out.go:177] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I0920 16:43:54.535561 8307 out.go:177] - KUBECONFIG=/home/jenkins/minikube-integration/19672-2235/kubeconfig
I0920 16:43:54.538001 8307 out.go:177] - MINIKUBE_HOME=/home/jenkins/minikube-integration/19672-2235/.minikube
I0920 16:43:54.540081 8307 out.go:177] - MINIKUBE_BIN=out/minikube-linux-arm64
I0920 16:43:54.542350 8307 out.go:177] - MINIKUBE_FORCE_SYSTEMD=
I0920 16:43:54.545136 8307 driver.go:394] Setting default libvirt URI to qemu:///system
I0920 16:43:54.570396 8307 docker.go:123] docker version: linux-27.3.0:Docker Engine - Community
I0920 16:43:54.570511 8307 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0920 16:43:54.625536 8307 info.go:266] docker info: {ID:6ZPO:QZND:VNGE:LUKL:4Y3K:XELL:AAX4:2GTK:E6LM:MPRN:3ZXR:TTMR Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:29 OomKillDisable:true NGoroutines:44 SystemTime:2024-09-20 16:43:54.615721842 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1070-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214831104 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-30-239 Labels:[] ExperimentalBuild:false ServerVersion:27.3.0 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:7f7fdf5fed64eb6a7caf99b3e12efcf9d60e311c Expected:7f7fdf5fed64eb6a7caf99b3e12efcf9d60e311c} RuncCommit:{ID:v1.1.14-0-g2c9f560 Expected:v1.1.14-0-g2c9f560} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:[WARNING: brid
ge-nf-call-iptables is disabled WARNING: bridge-nf-call-ip6tables is disabled] ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.17.1] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.29.6]] Warnings:<nil>}}
I0920 16:43:54.625650 8307 docker.go:318] overlay module found
I0920 16:43:54.627739 8307 out.go:177] * Using the docker driver based on user configuration
I0920 16:43:54.629899 8307 start.go:297] selected driver: docker
I0920 16:43:54.629930 8307 start.go:901] validating driver "docker" against <nil>
I0920 16:43:54.629944 8307 start.go:912] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I0920 16:43:54.630697 8307 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0920 16:43:54.678980 8307 info.go:266] docker info: {ID:6ZPO:QZND:VNGE:LUKL:4Y3K:XELL:AAX4:2GTK:E6LM:MPRN:3ZXR:TTMR Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:29 OomKillDisable:true NGoroutines:44 SystemTime:2024-09-20 16:43:54.669597018 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1070-aws OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:a
arch64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:2 MemTotal:8214831104 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ip-172-31-30-239 Labels:[] ExperimentalBuild:false ServerVersion:27.3.0 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:7f7fdf5fed64eb6a7caf99b3e12efcf9d60e311c Expected:7f7fdf5fed64eb6a7caf99b3e12efcf9d60e311c} RuncCommit:{ID:v1.1.14-0-g2c9f560 Expected:v1.1.14-0-g2c9f560} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:[WARNING: brid
ge-nf-call-iptables is disabled WARNING: bridge-nf-call-ip6tables is disabled] ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.17.1] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.29.6]] Warnings:<nil>}}
I0920 16:43:54.679177 8307 start_flags.go:310] no existing cluster config was found, will generate one from the flags
I0920 16:43:54.679392 8307 start_flags.go:947] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0920 16:43:54.681875 8307 out.go:177] * Using Docker driver with root privileges
I0920 16:43:54.683841 8307 cni.go:84] Creating CNI manager for ""
I0920 16:43:54.683902 8307 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0920 16:43:54.683925 8307 start_flags.go:319] Found "bridge CNI" CNI - setting NetworkPlugin=cni
I0920 16:43:54.684008 8307 start.go:340] cluster config:
{Name:addons-877987 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.1 ClusterName:addons-877987 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime
:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock:
SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0920 16:43:54.686474 8307 out.go:177] * Starting "addons-877987" primary control-plane node in "addons-877987" cluster
I0920 16:43:54.688549 8307 cache.go:121] Beginning downloading kic base image for docker with docker
I0920 16:43:54.690653 8307 out.go:177] * Pulling base image v0.0.45-1726784731-19672 ...
I0920 16:43:54.692800 8307 preload.go:131] Checking if preload exists for k8s version v1.31.1 and runtime docker
I0920 16:43:54.692845 8307 preload.go:146] Found local preload: /home/jenkins/minikube-integration/19672-2235/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.1-docker-overlay2-arm64.tar.lz4
I0920 16:43:54.692858 8307 cache.go:56] Caching tarball of preloaded images
I0920 16:43:54.692857 8307 image.go:79] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed in local docker daemon
I0920 16:43:54.692936 8307 preload.go:172] Found /home/jenkins/minikube-integration/19672-2235/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.1-docker-overlay2-arm64.tar.lz4 in cache, skipping download
I0920 16:43:54.692946 8307 cache.go:59] Finished verifying existence of preloaded tar for v1.31.1 on docker
I0920 16:43:54.693282 8307 profile.go:143] Saving config to /home/jenkins/minikube-integration/19672-2235/.minikube/profiles/addons-877987/config.json ...
I0920 16:43:54.693310 8307 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19672-2235/.minikube/profiles/addons-877987/config.json: {Name:mkf2e3ecc51cae16a5656830a7678f4e5142cf00 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0920 16:43:54.707977 8307 cache.go:149] Downloading gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed to local cache
I0920 16:43:54.708072 8307 image.go:63] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed in local cache directory
I0920 16:43:54.708090 8307 image.go:66] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed in local cache directory, skipping pull
I0920 16:43:54.708095 8307 image.go:135] gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed exists in cache, skipping pull
I0920 16:43:54.708102 8307 cache.go:152] successfully saved gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed as a tarball
I0920 16:43:54.708107 8307 cache.go:162] Loading gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed from local cache
I0920 16:44:12.104421 8307 cache.go:164] successfully loaded and using gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed from cached tarball
I0920 16:44:12.104468 8307 cache.go:194] Successfully downloaded all kic artifacts
I0920 16:44:12.104498 8307 start.go:360] acquireMachinesLock for addons-877987: {Name:mk221d5c4555b6842e86454c467ee0d2d76e805a Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0920 16:44:12.104621 8307 start.go:364] duration metric: took 99.514µs to acquireMachinesLock for "addons-877987"
I0920 16:44:12.104651 8307 start.go:93] Provisioning new machine with config: &{Name:addons-877987 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.1 ClusterName:addons-877987 Namespace:default APIServerHAVIP: APIServerName:min
ikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false Cust
omQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}
I0920 16:44:12.104734 8307 start.go:125] createHost starting for "" (driver="docker")
I0920 16:44:12.107964 8307 out.go:235] * Creating docker container (CPUs=2, Memory=4000MB) ...
I0920 16:44:12.108252 8307 start.go:159] libmachine.API.Create for "addons-877987" (driver="docker")
I0920 16:44:12.108294 8307 client.go:168] LocalClient.Create starting
I0920 16:44:12.108448 8307 main.go:141] libmachine: Creating CA: /home/jenkins/minikube-integration/19672-2235/.minikube/certs/ca.pem
I0920 16:44:12.391738 8307 main.go:141] libmachine: Creating client certificate: /home/jenkins/minikube-integration/19672-2235/.minikube/certs/cert.pem
I0920 16:44:12.662504 8307 cli_runner.go:164] Run: docker network inspect addons-877987 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W0920 16:44:12.678013 8307 cli_runner.go:211] docker network inspect addons-877987 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I0920 16:44:12.678098 8307 network_create.go:284] running [docker network inspect addons-877987] to gather additional debugging logs...
I0920 16:44:12.678129 8307 cli_runner.go:164] Run: docker network inspect addons-877987
W0920 16:44:12.692725 8307 cli_runner.go:211] docker network inspect addons-877987 returned with exit code 1
I0920 16:44:12.692759 8307 network_create.go:287] error running [docker network inspect addons-877987]: docker network inspect addons-877987: exit status 1
stdout:
[]
stderr:
Error response from daemon: network addons-877987 not found
I0920 16:44:12.692772 8307 network_create.go:289] output of [docker network inspect addons-877987]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network addons-877987 not found
** /stderr **
I0920 16:44:12.692871 8307 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0920 16:44:12.708657 8307 network.go:206] using free private subnet 192.168.49.0/24: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0x40017f00e0}
I0920 16:44:12.708708 8307 network_create.go:124] attempt to create docker network addons-877987 192.168.49.0/24 with gateway 192.168.49.1 and MTU of 1500 ...
I0920 16:44:12.708810 8307 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.49.0/24 --gateway=192.168.49.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=addons-877987 addons-877987
I0920 16:44:12.778583 8307 network_create.go:108] docker network addons-877987 192.168.49.0/24 created
I0920 16:44:12.778619 8307 kic.go:121] calculated static IP "192.168.49.2" for the "addons-877987" container
I0920 16:44:12.778704 8307 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0920 16:44:12.793536 8307 cli_runner.go:164] Run: docker volume create addons-877987 --label name.minikube.sigs.k8s.io=addons-877987 --label created_by.minikube.sigs.k8s.io=true
I0920 16:44:12.810527 8307 oci.go:103] Successfully created a docker volume addons-877987
I0920 16:44:12.810622 8307 cli_runner.go:164] Run: docker run --rm --name addons-877987-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-877987 --entrypoint /usr/bin/test -v addons-877987:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed -d /var/lib
I0920 16:44:15.018703 8307 cli_runner.go:217] Completed: docker run --rm --name addons-877987-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-877987 --entrypoint /usr/bin/test -v addons-877987:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed -d /var/lib: (2.20803358s)
I0920 16:44:15.018732 8307 oci.go:107] Successfully prepared a docker volume addons-877987
I0920 16:44:15.018753 8307 preload.go:131] Checking if preload exists for k8s version v1.31.1 and runtime docker
I0920 16:44:15.018772 8307 kic.go:194] Starting extracting preloaded images to volume ...
I0920 16:44:15.018839 8307 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/19672-2235/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.1-docker-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v addons-877987:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed -I lz4 -xf /preloaded.tar -C /extractDir
I0920 16:44:18.755755 8307 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/19672-2235/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.1-docker-overlay2-arm64.tar.lz4:/preloaded.tar:ro -v addons-877987:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed -I lz4 -xf /preloaded.tar -C /extractDir: (3.736874567s)
I0920 16:44:18.755787 8307 kic.go:203] duration metric: took 3.737011924s to extract preloaded images to volume ...
W0920 16:44:18.755951 8307 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I0920 16:44:18.756069 8307 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0920 16:44:18.807666 8307 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname addons-877987 --name addons-877987 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-877987 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=addons-877987 --network addons-877987 --ip 192.168.49.2 --volume addons-877987:/var --security-opt apparmor=unconfined --memory=4000mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed
I0920 16:44:19.159816 8307 cli_runner.go:164] Run: docker container inspect addons-877987 --format={{.State.Running}}
I0920 16:44:19.187040 8307 cli_runner.go:164] Run: docker container inspect addons-877987 --format={{.State.Status}}
I0920 16:44:19.208953 8307 cli_runner.go:164] Run: docker exec addons-877987 stat /var/lib/dpkg/alternatives/iptables
I0920 16:44:19.275920 8307 oci.go:144] the created container "addons-877987" has a running status.
I0920 16:44:19.275952 8307 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/19672-2235/.minikube/machines/addons-877987/id_rsa...
I0920 16:44:19.520474 8307 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/19672-2235/.minikube/machines/addons-877987/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0920 16:44:19.548325 8307 cli_runner.go:164] Run: docker container inspect addons-877987 --format={{.State.Status}}
I0920 16:44:19.569407 8307 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0920 16:44:19.569426 8307 kic_runner.go:114] Args: [docker exec --privileged addons-877987 chown docker:docker /home/docker/.ssh/authorized_keys]
I0920 16:44:19.656723 8307 cli_runner.go:164] Run: docker container inspect addons-877987 --format={{.State.Status}}
I0920 16:44:19.678500 8307 machine.go:93] provisionDockerMachine start ...
I0920 16:44:19.678592 8307 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877987
I0920 16:44:19.703704 8307 main.go:141] libmachine: Using SSH client type: native
I0920 16:44:19.703990 8307 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x413650] 0x415e90 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0920 16:44:19.704007 8307 main.go:141] libmachine: About to run SSH command:
hostname
I0920 16:44:19.704687 8307 main.go:141] libmachine: Error dialing TCP: ssh: handshake failed: read tcp 127.0.0.1:55594->127.0.0.1:32768: read: connection reset by peer
I0920 16:44:22.837525 8307 main.go:141] libmachine: SSH cmd err, output: <nil>: addons-877987
I0920 16:44:22.837592 8307 ubuntu.go:169] provisioning hostname "addons-877987"
I0920 16:44:22.837688 8307 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877987
I0920 16:44:22.853963 8307 main.go:141] libmachine: Using SSH client type: native
I0920 16:44:22.854212 8307 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x413650] 0x415e90 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0920 16:44:22.854224 8307 main.go:141] libmachine: About to run SSH command:
sudo hostname addons-877987 && echo "addons-877987" | sudo tee /etc/hostname
I0920 16:44:22.997849 8307 main.go:141] libmachine: SSH cmd err, output: <nil>: addons-877987
I0920 16:44:22.997936 8307 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877987
I0920 16:44:23.014498 8307 main.go:141] libmachine: Using SSH client type: native
I0920 16:44:23.014745 8307 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x413650] 0x415e90 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0920 16:44:23.014768 8307 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\saddons-877987' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 addons-877987/g' /etc/hosts;
else
echo '127.0.1.1 addons-877987' | sudo tee -a /etc/hosts;
fi
fi
I0920 16:44:23.146103 8307 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0920 16:44:23.146129 8307 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/19672-2235/.minikube CaCertPath:/home/jenkins/minikube-integration/19672-2235/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/19672-2235/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/19672-2235/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/19672-2235/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/19672-2235/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/19672-2235/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/19672-2235/.minikube}
I0920 16:44:23.146151 8307 ubuntu.go:177] setting up certificates
I0920 16:44:23.146160 8307 provision.go:84] configureAuth start
I0920 16:44:23.146223 8307 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-877987
I0920 16:44:23.163229 8307 provision.go:143] copyHostCerts
I0920 16:44:23.163305 8307 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19672-2235/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/19672-2235/.minikube/ca.pem (1082 bytes)
I0920 16:44:23.163418 8307 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19672-2235/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/19672-2235/.minikube/cert.pem (1123 bytes)
I0920 16:44:23.163470 8307 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19672-2235/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/19672-2235/.minikube/key.pem (1679 bytes)
I0920 16:44:23.163514 8307 provision.go:117] generating server cert: /home/jenkins/minikube-integration/19672-2235/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/19672-2235/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/19672-2235/.minikube/certs/ca-key.pem org=jenkins.addons-877987 san=[127.0.0.1 192.168.49.2 addons-877987 localhost minikube]
I0920 16:44:23.672518 8307 provision.go:177] copyRemoteCerts
I0920 16:44:23.672590 8307 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0920 16:44:23.672636 8307 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877987
I0920 16:44:23.689850 8307 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19672-2235/.minikube/machines/addons-877987/id_rsa Username:docker}
I0920 16:44:23.782956 8307 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19672-2235/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I0920 16:44:23.807505 8307 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19672-2235/.minikube/machines/server.pem --> /etc/docker/server.pem (1208 bytes)
I0920 16:44:23.831141 8307 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19672-2235/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I0920 16:44:23.854940 8307 provision.go:87] duration metric: took 708.766629ms to configureAuth
I0920 16:44:23.855000 8307 ubuntu.go:193] setting minikube options for container-runtime
I0920 16:44:23.855197 8307 config.go:182] Loaded profile config "addons-877987": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.31.1
I0920 16:44:23.855294 8307 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877987
I0920 16:44:23.872194 8307 main.go:141] libmachine: Using SSH client type: native
I0920 16:44:23.872450 8307 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x413650] 0x415e90 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0920 16:44:23.872468 8307 main.go:141] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I0920 16:44:24.006471 8307 main.go:141] libmachine: SSH cmd err, output: <nil>: overlay
I0920 16:44:24.006490 8307 ubuntu.go:71] root file system type: overlay
I0920 16:44:24.006595 8307 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I0920 16:44:24.006666 8307 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877987
I0920 16:44:24.033655 8307 main.go:141] libmachine: Using SSH client type: native
I0920 16:44:24.033922 8307 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x413650] 0x415e90 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0920 16:44:24.034016 8307 main.go:141] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I0920 16:44:24.178479 8307 main.go:141] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
I0920 16:44:24.178568 8307 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877987
I0920 16:44:24.196231 8307 main.go:141] libmachine: Using SSH client type: native
I0920 16:44:24.196470 8307 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x413650] 0x415e90 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0920 16:44:24.196502 8307 main.go:141] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I0920 16:44:24.959646 8307 main.go:141] libmachine: SSH cmd err, output: <nil>: --- /lib/systemd/system/docker.service 2024-09-19 14:24:16.000000000 +0000
+++ /lib/systemd/system/docker.service.new 2024-09-20 16:44:24.170368569 +0000
@@ -1,46 +1,49 @@
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
-After=network-online.target docker.socket firewalld.service containerd.service time-set.target
-Wants=network-online.target containerd.service
+BindsTo=containerd.service
+After=network-online.target firewalld.service containerd.service
+Wants=network-online.target
Requires=docker.socket
+StartLimitBurst=3
+StartLimitIntervalSec=60
[Service]
Type=notify
-# the default is not to use systemd for cgroups because the delegate issues still
-# exists and systemd currently does not support the cgroup feature set required
-# for containers run by docker
-ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
-ExecReload=/bin/kill -s HUP $MAINPID
-TimeoutStartSec=0
-RestartSec=2
-Restart=always
+Restart=on-failure
-# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
-# Both the old, and new location are accepted by systemd 229 and up, so using the old location
-# to make them work for either version of systemd.
-StartLimitBurst=3
-# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
-# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
-# this option work for either version of systemd.
-StartLimitInterval=60s
+
+# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
+# The base configuration already specifies an 'ExecStart=...' command. The first directive
+# here is to clear out that command inherited from the base configuration. Without this,
+# the command from the base configuration and the command specified here are treated as
+# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
+# will catch this invalid input and refuse to start the service with an error like:
+# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
+
+# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
+# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
+ExecStart=
+ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
+ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
+LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
-# Comment TasksMax if your systemd version does not support it.
-# Only systemd 226 and above support this option.
+# Uncomment TasksMax if your systemd version supports it.
+# Only systemd 226 and above support this version.
TasksMax=infinity
+TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
-OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
Synchronizing state of docker.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable docker
I0920 16:44:24.959745 8307 machine.go:96] duration metric: took 5.281220375s to provisionDockerMachine
I0920 16:44:24.959800 8307 client.go:171] duration metric: took 12.851494838s to LocalClient.Create
I0920 16:44:24.959843 8307 start.go:167] duration metric: took 12.851591578s to libmachine.API.Create "addons-877987"
I0920 16:44:24.959867 8307 start.go:293] postStartSetup for "addons-877987" (driver="docker")
I0920 16:44:24.959912 8307 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0920 16:44:24.960000 8307 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0920 16:44:24.960069 8307 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877987
I0920 16:44:24.979053 8307 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19672-2235/.minikube/machines/addons-877987/id_rsa Username:docker}
I0920 16:44:25.075552 8307 ssh_runner.go:195] Run: cat /etc/os-release
I0920 16:44:25.078673 8307 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0920 16:44:25.078710 8307 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0920 16:44:25.078722 8307 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0920 16:44:25.078729 8307 info.go:137] Remote host: Ubuntu 22.04.5 LTS
I0920 16:44:25.078740 8307 filesync.go:126] Scanning /home/jenkins/minikube-integration/19672-2235/.minikube/addons for local assets ...
I0920 16:44:25.078814 8307 filesync.go:126] Scanning /home/jenkins/minikube-integration/19672-2235/.minikube/files for local assets ...
I0920 16:44:25.078838 8307 start.go:296] duration metric: took 118.931939ms for postStartSetup
I0920 16:44:25.079152 8307 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-877987
I0920 16:44:25.096442 8307 profile.go:143] Saving config to /home/jenkins/minikube-integration/19672-2235/.minikube/profiles/addons-877987/config.json ...
I0920 16:44:25.096735 8307 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0920 16:44:25.096787 8307 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877987
I0920 16:44:25.115105 8307 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19672-2235/.minikube/machines/addons-877987/id_rsa Username:docker}
I0920 16:44:25.207105 8307 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0920 16:44:25.211624 8307 start.go:128] duration metric: took 13.106874758s to createHost
I0920 16:44:25.211648 8307 start.go:83] releasing machines lock for "addons-877987", held for 13.107013493s
I0920 16:44:25.211738 8307 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-877987
I0920 16:44:25.227312 8307 ssh_runner.go:195] Run: cat /version.json
I0920 16:44:25.227366 8307 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877987
I0920 16:44:25.227598 8307 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0920 16:44:25.227667 8307 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877987
I0920 16:44:25.247976 8307 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19672-2235/.minikube/machines/addons-877987/id_rsa Username:docker}
I0920 16:44:25.250523 8307 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19672-2235/.minikube/machines/addons-877987/id_rsa Username:docker}
I0920 16:44:25.468285 8307 ssh_runner.go:195] Run: systemctl --version
I0920 16:44:25.472716 8307 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0920 16:44:25.477867 8307 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I0920 16:44:25.503533 8307 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I0920 16:44:25.503612 8307 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0920 16:44:25.533791 8307 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I0920 16:44:25.533865 8307 start.go:495] detecting cgroup driver to use...
I0920 16:44:25.533911 8307 detect.go:187] detected "cgroupfs" cgroup driver on host os
I0920 16:44:25.534053 8307 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0920 16:44:25.550171 8307 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10"|' /etc/containerd/config.toml"
I0920 16:44:25.559861 8307 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0920 16:44:25.569780 8307 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I0920 16:44:25.569893 8307 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I0920 16:44:25.580226 8307 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0920 16:44:25.590111 8307 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0920 16:44:25.600464 8307 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0920 16:44:25.610295 8307 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0920 16:44:25.619974 8307 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0920 16:44:25.630047 8307 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0920 16:44:25.639743 8307 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0920 16:44:25.649550 8307 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0920 16:44:25.658262 8307 crio.go:166] couldn't verify netfilter by "sudo sysctl net.bridge.bridge-nf-call-iptables" which might be okay. error: sudo sysctl net.bridge.bridge-nf-call-iptables: Process exited with status 255
stdout:
stderr:
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
I0920 16:44:25.658402 8307 ssh_runner.go:195] Run: sudo modprobe br_netfilter
I0920 16:44:25.673675 8307 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0920 16:44:25.683512 8307 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0920 16:44:25.770052 8307 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0920 16:44:25.870831 8307 start.go:495] detecting cgroup driver to use...
I0920 16:44:25.870931 8307 detect.go:187] detected "cgroupfs" cgroup driver on host os
I0920 16:44:25.871029 8307 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I0920 16:44:25.884767 8307 cruntime.go:279] skipping containerd shutdown because we are bound to it
I0920 16:44:25.884876 8307 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0920 16:44:25.898913 8307 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I0920 16:44:25.919706 8307 ssh_runner.go:195] Run: which cri-dockerd
I0920 16:44:25.923983 8307 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I0920 16:44:25.937745 8307 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (190 bytes)
I0920 16:44:25.958469 8307 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I0920 16:44:26.063067 8307 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I0920 16:44:26.171944 8307 docker.go:574] configuring docker to use "cgroupfs" as cgroup driver...
I0920 16:44:26.172110 8307 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
I0920 16:44:26.192870 8307 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0920 16:44:26.284487 8307 ssh_runner.go:195] Run: sudo systemctl restart docker
I0920 16:44:26.551982 8307 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I0920 16:44:26.564432 8307 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0920 16:44:26.577900 8307 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I0920 16:44:26.670746 8307 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I0920 16:44:26.765560 8307 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0920 16:44:26.846464 8307 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I0920 16:44:26.860334 8307 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0920 16:44:26.871273 8307 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0920 16:44:26.959281 8307 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I0920 16:44:27.036759 8307 start.go:542] Will wait 60s for socket path /var/run/cri-dockerd.sock
I0920 16:44:27.036906 8307 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I0920 16:44:27.041400 8307 start.go:563] Will wait 60s for crictl version
I0920 16:44:27.041545 8307 ssh_runner.go:195] Run: which crictl
I0920 16:44:27.047153 8307 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0920 16:44:27.090884 8307 start.go:579] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 27.3.0
RuntimeApiVersion: v1
I0920 16:44:27.091022 8307 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0920 16:44:27.113743 8307 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0920 16:44:27.138966 8307 out.go:235] * Preparing Kubernetes v1.31.1 on Docker 27.3.0 ...
I0920 16:44:27.139093 8307 cli_runner.go:164] Run: docker network inspect addons-877987 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0920 16:44:27.154810 8307 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I0920 16:44:27.158520 8307 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0920 16:44:27.169854 8307 kubeadm.go:883] updating cluster {Name:addons-877987 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.1 ClusterName:addons-877987 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNa
mes:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuF
irmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I0920 16:44:27.169970 8307 preload.go:131] Checking if preload exists for k8s version v1.31.1 and runtime docker
I0920 16:44:27.170024 8307 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0920 16:44:27.188920 8307 docker.go:685] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.31.1
registry.k8s.io/kube-controller-manager:v1.31.1
registry.k8s.io/kube-scheduler:v1.31.1
registry.k8s.io/kube-proxy:v1.31.1
registry.k8s.io/coredns/coredns:v1.11.3
registry.k8s.io/etcd:3.5.15-0
registry.k8s.io/pause:3.10
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I0920 16:44:27.188946 8307 docker.go:615] Images already preloaded, skipping extraction
I0920 16:44:27.189011 8307 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0920 16:44:27.207090 8307 docker.go:685] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.31.1
registry.k8s.io/kube-controller-manager:v1.31.1
registry.k8s.io/kube-scheduler:v1.31.1
registry.k8s.io/kube-proxy:v1.31.1
registry.k8s.io/coredns/coredns:v1.11.3
registry.k8s.io/etcd:3.5.15-0
registry.k8s.io/pause:3.10
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I0920 16:44:27.207114 8307 cache_images.go:84] Images are preloaded, skipping loading
I0920 16:44:27.207125 8307 kubeadm.go:934] updating node { 192.168.49.2 8443 v1.31.1 docker true true} ...
I0920 16:44:27.207266 8307 kubeadm.go:946] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.31.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=addons-877987 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.2
[Install]
config:
{KubernetesVersion:v1.31.1 ClusterName:addons-877987 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0920 16:44:27.207372 8307 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
I0920 16:44:27.249268 8307 cni.go:84] Creating CNI manager for ""
I0920 16:44:27.249308 8307 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0920 16:44:27.249320 8307 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
I0920 16:44:27.249341 8307 kubeadm.go:181] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8443 KubernetesVersion:v1.31.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:addons-877987 NodeName:addons-877987 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.49.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kuber
netes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I0920 16:44:27.249506 8307 kubeadm.go:187] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.49.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
name: "addons-877987"
kubeletExtraArgs:
node-ip: 192.168.49.2
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.31.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I0920 16:44:27.249597 8307 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.31.1
I0920 16:44:27.258639 8307 binaries.go:44] Found k8s binaries, skipping transfer
I0920 16:44:27.258712 8307 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I0920 16:44:27.267363 8307 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (312 bytes)
I0920 16:44:27.286346 8307 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0920 16:44:27.304368 8307 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2155 bytes)
I0920 16:44:27.322250 8307 ssh_runner.go:195] Run: grep 192.168.49.2 control-plane.minikube.internal$ /etc/hosts
I0920 16:44:27.325692 8307 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0920 16:44:27.336217 8307 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0920 16:44:27.421986 8307 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0920 16:44:27.442966 8307 certs.go:68] Setting up /home/jenkins/minikube-integration/19672-2235/.minikube/profiles/addons-877987 for IP: 192.168.49.2
I0920 16:44:27.442986 8307 certs.go:194] generating shared ca certs ...
I0920 16:44:27.443001 8307 certs.go:226] acquiring lock for ca certs: {Name:mk539b11c006d047f7d221e4c2dcf26c06d5e779 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0920 16:44:27.443123 8307 certs.go:240] generating "minikubeCA" ca cert: /home/jenkins/minikube-integration/19672-2235/.minikube/ca.key
I0920 16:44:27.826406 8307 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19672-2235/.minikube/ca.crt ...
I0920 16:44:27.826440 8307 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19672-2235/.minikube/ca.crt: {Name:mk5af13a70dac5b7e434eaf057dfe487d146bf5d Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0920 16:44:27.827044 8307 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19672-2235/.minikube/ca.key ...
I0920 16:44:27.827097 8307 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19672-2235/.minikube/ca.key: {Name:mkc6071658ed474787d12f09fb370c2d8c3a8e62 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0920 16:44:27.827229 8307 certs.go:240] generating "proxyClientCA" ca cert: /home/jenkins/minikube-integration/19672-2235/.minikube/proxy-client-ca.key
I0920 16:44:28.376351 8307 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19672-2235/.minikube/proxy-client-ca.crt ...
I0920 16:44:28.376384 8307 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19672-2235/.minikube/proxy-client-ca.crt: {Name:mkb80da8e44642552765b0422cd2ea11ccfb23b1 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0920 16:44:28.376607 8307 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19672-2235/.minikube/proxy-client-ca.key ...
I0920 16:44:28.376622 8307 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19672-2235/.minikube/proxy-client-ca.key: {Name:mke01337e9b6efbb5075844049fc7a26db49de6b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0920 16:44:28.376708 8307 certs.go:256] generating profile certs ...
I0920 16:44:28.376770 8307 certs.go:363] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/19672-2235/.minikube/profiles/addons-877987/client.key
I0920 16:44:28.376796 8307 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19672-2235/.minikube/profiles/addons-877987/client.crt with IP's: []
I0920 16:44:29.534696 8307 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19672-2235/.minikube/profiles/addons-877987/client.crt ...
I0920 16:44:29.534730 8307 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19672-2235/.minikube/profiles/addons-877987/client.crt: {Name:mk810d0e54dde39af38c3cd8fb6a8ae5e9408977 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0920 16:44:29.534925 8307 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19672-2235/.minikube/profiles/addons-877987/client.key ...
I0920 16:44:29.534941 8307 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19672-2235/.minikube/profiles/addons-877987/client.key: {Name:mk89fab8c443f6b170b725ec9cd45b281b4c7e43 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0920 16:44:29.535027 8307 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/19672-2235/.minikube/profiles/addons-877987/apiserver.key.c6544f1a
I0920 16:44:29.535046 8307 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19672-2235/.minikube/profiles/addons-877987/apiserver.crt.c6544f1a with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.49.2]
I0920 16:44:29.930141 8307 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19672-2235/.minikube/profiles/addons-877987/apiserver.crt.c6544f1a ...
I0920 16:44:29.930175 8307 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19672-2235/.minikube/profiles/addons-877987/apiserver.crt.c6544f1a: {Name:mk3b3b68436f509194f43d38146ae13f1765c13b Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0920 16:44:29.930396 8307 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19672-2235/.minikube/profiles/addons-877987/apiserver.key.c6544f1a ...
I0920 16:44:29.930413 8307 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19672-2235/.minikube/profiles/addons-877987/apiserver.key.c6544f1a: {Name:mk7fbc7421b690435882ccc40bd022e33069a6e4 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0920 16:44:29.930501 8307 certs.go:381] copying /home/jenkins/minikube-integration/19672-2235/.minikube/profiles/addons-877987/apiserver.crt.c6544f1a -> /home/jenkins/minikube-integration/19672-2235/.minikube/profiles/addons-877987/apiserver.crt
I0920 16:44:29.930581 8307 certs.go:385] copying /home/jenkins/minikube-integration/19672-2235/.minikube/profiles/addons-877987/apiserver.key.c6544f1a -> /home/jenkins/minikube-integration/19672-2235/.minikube/profiles/addons-877987/apiserver.key
I0920 16:44:29.930635 8307 certs.go:363] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/19672-2235/.minikube/profiles/addons-877987/proxy-client.key
I0920 16:44:29.930655 8307 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19672-2235/.minikube/profiles/addons-877987/proxy-client.crt with IP's: []
I0920 16:44:30.260423 8307 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19672-2235/.minikube/profiles/addons-877987/proxy-client.crt ...
I0920 16:44:30.260460 8307 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19672-2235/.minikube/profiles/addons-877987/proxy-client.crt: {Name:mkd614d149f84bdbb7bd52fc02a5a988dcfbe503 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0920 16:44:30.260691 8307 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19672-2235/.minikube/profiles/addons-877987/proxy-client.key ...
I0920 16:44:30.260707 8307 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19672-2235/.minikube/profiles/addons-877987/proxy-client.key: {Name:mk563c190898ad10a3e8202468da5e54def6a022 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0920 16:44:30.260916 8307 certs.go:484] found cert: /home/jenkins/minikube-integration/19672-2235/.minikube/certs/ca-key.pem (1679 bytes)
I0920 16:44:30.260960 8307 certs.go:484] found cert: /home/jenkins/minikube-integration/19672-2235/.minikube/certs/ca.pem (1082 bytes)
I0920 16:44:30.260989 8307 certs.go:484] found cert: /home/jenkins/minikube-integration/19672-2235/.minikube/certs/cert.pem (1123 bytes)
I0920 16:44:30.261017 8307 certs.go:484] found cert: /home/jenkins/minikube-integration/19672-2235/.minikube/certs/key.pem (1679 bytes)
I0920 16:44:30.261626 8307 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19672-2235/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0920 16:44:30.288919 8307 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19672-2235/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes)
I0920 16:44:30.314297 8307 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19672-2235/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0920 16:44:30.338941 8307 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19672-2235/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I0920 16:44:30.364350 8307 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19672-2235/.minikube/profiles/addons-877987/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1419 bytes)
I0920 16:44:30.392057 8307 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19672-2235/.minikube/profiles/addons-877987/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
I0920 16:44:30.422137 8307 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19672-2235/.minikube/profiles/addons-877987/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0920 16:44:30.450623 8307 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19672-2235/.minikube/profiles/addons-877987/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I0920 16:44:30.474517 8307 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19672-2235/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0920 16:44:30.498533 8307 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I0920 16:44:30.516058 8307 ssh_runner.go:195] Run: openssl version
I0920 16:44:30.521380 8307 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0920 16:44:30.530887 8307 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0920 16:44:30.534516 8307 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Sep 20 16:44 /usr/share/ca-certificates/minikubeCA.pem
I0920 16:44:30.534587 8307 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0920 16:44:30.541519 8307 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0920 16:44:30.550612 8307 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0920 16:44:30.553737 8307 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I0920 16:44:30.553784 8307 kubeadm.go:392] StartCluster: {Name:addons-877987 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726784731-19672@sha256:7f8c62ddb0100a5b958dd19c5b5478b8c7ef13da9a0a4d6c7d18f43544e0dbed Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.1 ClusterName:addons-877987 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames
:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirm
warePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0920 16:44:30.553912 8307 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I0920 16:44:30.569013 8307 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I0920 16:44:30.577535 8307 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I0920 16:44:30.586433 8307 kubeadm.go:214] ignoring SystemVerification for kubeadm because of docker driver
I0920 16:44:30.586500 8307 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I0920 16:44:30.595162 8307 kubeadm.go:155] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I0920 16:44:30.595185 8307 kubeadm.go:157] found existing configuration files:
I0920 16:44:30.595240 8307 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I0920 16:44:30.604356 8307 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I0920 16:44:30.604433 8307 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I0920 16:44:30.613105 8307 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I0920 16:44:30.622052 8307 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I0920 16:44:30.622119 8307 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I0920 16:44:30.630763 8307 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I0920 16:44:30.639510 8307 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I0920 16:44:30.639604 8307 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I0920 16:44:30.648285 8307 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I0920 16:44:30.656900 8307 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I0920 16:44:30.656966 8307 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I0920 16:44:30.665336 8307 ssh_runner.go:286] Start: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.31.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I0920 16:44:30.711423 8307 kubeadm.go:310] [init] Using Kubernetes version: v1.31.1
I0920 16:44:30.711737 8307 kubeadm.go:310] [preflight] Running pre-flight checks
I0920 16:44:30.734952 8307 kubeadm.go:310] [preflight] The system verification failed. Printing the output from the verification:
I0920 16:44:30.735026 8307 kubeadm.go:310] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1070-aws[0m
I0920 16:44:30.735065 8307 kubeadm.go:310] [0;37mOS[0m: [0;32mLinux[0m
I0920 16:44:30.735115 8307 kubeadm.go:310] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I0920 16:44:30.735166 8307 kubeadm.go:310] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I0920 16:44:30.735217 8307 kubeadm.go:310] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I0920 16:44:30.735267 8307 kubeadm.go:310] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I0920 16:44:30.735317 8307 kubeadm.go:310] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I0920 16:44:30.735369 8307 kubeadm.go:310] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I0920 16:44:30.735436 8307 kubeadm.go:310] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I0920 16:44:30.735488 8307 kubeadm.go:310] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I0920 16:44:30.735542 8307 kubeadm.go:310] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I0920 16:44:30.795132 8307 kubeadm.go:310] [preflight] Pulling images required for setting up a Kubernetes cluster
I0920 16:44:30.795323 8307 kubeadm.go:310] [preflight] This might take a minute or two, depending on the speed of your internet connection
I0920 16:44:30.795479 8307 kubeadm.go:310] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I0920 16:44:30.810672 8307 kubeadm.go:310] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I0920 16:44:30.815601 8307 out.go:235] - Generating certificates and keys ...
I0920 16:44:30.815790 8307 kubeadm.go:310] [certs] Using existing ca certificate authority
I0920 16:44:30.815906 8307 kubeadm.go:310] [certs] Using existing apiserver certificate and key on disk
I0920 16:44:30.999283 8307 kubeadm.go:310] [certs] Generating "apiserver-kubelet-client" certificate and key
I0920 16:44:31.570686 8307 kubeadm.go:310] [certs] Generating "front-proxy-ca" certificate and key
I0920 16:44:31.792253 8307 kubeadm.go:310] [certs] Generating "front-proxy-client" certificate and key
I0920 16:44:32.367974 8307 kubeadm.go:310] [certs] Generating "etcd/ca" certificate and key
I0920 16:44:32.831025 8307 kubeadm.go:310] [certs] Generating "etcd/server" certificate and key
I0920 16:44:32.831386 8307 kubeadm.go:310] [certs] etcd/server serving cert is signed for DNS names [addons-877987 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0920 16:44:33.242865 8307 kubeadm.go:310] [certs] Generating "etcd/peer" certificate and key
I0920 16:44:33.243212 8307 kubeadm.go:310] [certs] etcd/peer serving cert is signed for DNS names [addons-877987 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0920 16:44:33.676874 8307 kubeadm.go:310] [certs] Generating "etcd/healthcheck-client" certificate and key
I0920 16:44:34.184288 8307 kubeadm.go:310] [certs] Generating "apiserver-etcd-client" certificate and key
I0920 16:44:34.458143 8307 kubeadm.go:310] [certs] Generating "sa" key and public key
I0920 16:44:34.458424 8307 kubeadm.go:310] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I0920 16:44:34.783056 8307 kubeadm.go:310] [kubeconfig] Writing "admin.conf" kubeconfig file
I0920 16:44:34.918262 8307 kubeadm.go:310] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I0920 16:44:35.298900 8307 kubeadm.go:310] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I0920 16:44:36.537829 8307 kubeadm.go:310] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I0920 16:44:36.961993 8307 kubeadm.go:310] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I0920 16:44:36.962807 8307 kubeadm.go:310] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I0920 16:44:36.967890 8307 kubeadm.go:310] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I0920 16:44:36.970531 8307 out.go:235] - Booting up control plane ...
I0920 16:44:36.970643 8307 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-apiserver"
I0920 16:44:36.970733 8307 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I0920 16:44:36.971341 8307 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-scheduler"
I0920 16:44:36.981636 8307 kubeadm.go:310] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I0920 16:44:36.987729 8307 kubeadm.go:310] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I0920 16:44:36.987789 8307 kubeadm.go:310] [kubelet-start] Starting the kubelet
I0920 16:44:37.112429 8307 kubeadm.go:310] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I0920 16:44:37.112587 8307 kubeadm.go:310] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I0920 16:44:38.123155 8307 kubeadm.go:310] [kubelet-check] The kubelet is healthy after 1.010677628s
I0920 16:44:38.123244 8307 kubeadm.go:310] [api-check] Waiting for a healthy API server. This can take up to 4m0s
I0920 16:44:45.129468 8307 kubeadm.go:310] [api-check] The API server is healthy after 7.004799001s
I0920 16:44:45.192817 8307 kubeadm.go:310] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I0920 16:44:45.710220 8307 kubeadm.go:310] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I0920 16:44:45.733042 8307 kubeadm.go:310] [upload-certs] Skipping phase. Please see --upload-certs
I0920 16:44:45.733259 8307 kubeadm.go:310] [mark-control-plane] Marking the node addons-877987 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I0920 16:44:45.743782 8307 kubeadm.go:310] [bootstrap-token] Using token: 9d72in.480gru1u9ujudilj
I0920 16:44:45.746276 8307 out.go:235] - Configuring RBAC rules ...
I0920 16:44:45.746424 8307 kubeadm.go:310] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I0920 16:44:45.750868 8307 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I0920 16:44:45.760064 8307 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I0920 16:44:45.763708 8307 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I0920 16:44:45.767663 8307 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I0920 16:44:45.771474 8307 kubeadm.go:310] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I0920 16:44:45.903922 8307 kubeadm.go:310] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I0920 16:44:46.330245 8307 kubeadm.go:310] [addons] Applied essential addon: CoreDNS
I0920 16:44:46.902979 8307 kubeadm.go:310] [addons] Applied essential addon: kube-proxy
I0920 16:44:46.904158 8307 kubeadm.go:310]
I0920 16:44:46.904231 8307 kubeadm.go:310] Your Kubernetes control-plane has initialized successfully!
I0920 16:44:46.904237 8307 kubeadm.go:310]
I0920 16:44:46.904333 8307 kubeadm.go:310] To start using your cluster, you need to run the following as a regular user:
I0920 16:44:46.904349 8307 kubeadm.go:310]
I0920 16:44:46.904374 8307 kubeadm.go:310] mkdir -p $HOME/.kube
I0920 16:44:46.904452 8307 kubeadm.go:310] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I0920 16:44:46.904508 8307 kubeadm.go:310] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I0920 16:44:46.904520 8307 kubeadm.go:310]
I0920 16:44:46.904578 8307 kubeadm.go:310] Alternatively, if you are the root user, you can run:
I0920 16:44:46.904586 8307 kubeadm.go:310]
I0920 16:44:46.904633 8307 kubeadm.go:310] export KUBECONFIG=/etc/kubernetes/admin.conf
I0920 16:44:46.904641 8307 kubeadm.go:310]
I0920 16:44:46.904692 8307 kubeadm.go:310] You should now deploy a pod network to the cluster.
I0920 16:44:46.904774 8307 kubeadm.go:310] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I0920 16:44:46.904846 8307 kubeadm.go:310] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I0920 16:44:46.904855 8307 kubeadm.go:310]
I0920 16:44:46.904938 8307 kubeadm.go:310] You can now join any number of control-plane nodes by copying certificate authorities
I0920 16:44:46.905018 8307 kubeadm.go:310] and service account keys on each node and then running the following as root:
I0920 16:44:46.905027 8307 kubeadm.go:310]
I0920 16:44:46.905109 8307 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token 9d72in.480gru1u9ujudilj \
I0920 16:44:46.905214 8307 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:3d039dfc643410e269f96757eff51180894f5dc32e113f840efb2336fc2b49fa \
I0920 16:44:46.905238 8307 kubeadm.go:310] --control-plane
I0920 16:44:46.905243 8307 kubeadm.go:310]
I0920 16:44:46.905326 8307 kubeadm.go:310] Then you can join any number of worker nodes by running the following on each as root:
I0920 16:44:46.905330 8307 kubeadm.go:310]
I0920 16:44:46.905417 8307 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token 9d72in.480gru1u9ujudilj \
I0920 16:44:46.905522 8307 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:3d039dfc643410e269f96757eff51180894f5dc32e113f840efb2336fc2b49fa
I0920 16:44:46.908253 8307 kubeadm.go:310] W0920 16:44:30.707226 1825 common.go:101] your configuration file uses a deprecated API spec: "kubeadm.k8s.io/v1beta3" (kind: "ClusterConfiguration"). Please use 'kubeadm config migrate --old-config old.yaml --new-config new.yaml', which will write the new, similar spec using a newer API version.
I0920 16:44:46.908552 8307 kubeadm.go:310] W0920 16:44:30.708784 1825 common.go:101] your configuration file uses a deprecated API spec: "kubeadm.k8s.io/v1beta3" (kind: "InitConfiguration"). Please use 'kubeadm config migrate --old-config old.yaml --new-config new.yaml', which will write the new, similar spec using a newer API version.
I0920 16:44:46.908771 8307 kubeadm.go:310] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1070-aws\n", err: exit status 1
I0920 16:44:46.908887 8307 kubeadm.go:310] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I0920 16:44:46.908912 8307 cni.go:84] Creating CNI manager for ""
I0920 16:44:46.908929 8307 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0920 16:44:46.913103 8307 out.go:177] * Configuring bridge CNI (Container Networking Interface) ...
I0920 16:44:46.915405 8307 ssh_runner.go:195] Run: sudo mkdir -p /etc/cni/net.d
I0920 16:44:46.924333 8307 ssh_runner.go:362] scp memory --> /etc/cni/net.d/1-k8s.conflist (496 bytes)
I0920 16:44:46.941664 8307 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I0920 16:44:46.941787 8307 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I0920 16:44:46.941867 8307 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes addons-877987 minikube.k8s.io/updated_at=2024_09_20T16_44_46_0700 minikube.k8s.io/version=v1.34.0 minikube.k8s.io/commit=0626f22cf0d915d75e291a5bce701f94395056e1 minikube.k8s.io/name=addons-877987 minikube.k8s.io/primary=true
I0920 16:44:47.121172 8307 ops.go:34] apiserver oom_adj: -16
I0920 16:44:47.121342 8307 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0920 16:44:47.621500 8307 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0920 16:44:48.121416 8307 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0920 16:44:48.622332 8307 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0920 16:44:49.121672 8307 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0920 16:44:49.622390 8307 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0920 16:44:50.122224 8307 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0920 16:44:50.622126 8307 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0920 16:44:51.121731 8307 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0920 16:44:51.235314 8307 kubeadm.go:1113] duration metric: took 4.293563606s to wait for elevateKubeSystemPrivileges
I0920 16:44:51.235347 8307 kubeadm.go:394] duration metric: took 20.68156773s to StartCluster
I0920 16:44:51.235366 8307 settings.go:142] acquiring lock: {Name:mk231bf5a5cfcfec5102639d93468a1e4a41c89f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0920 16:44:51.235493 8307 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/19672-2235/kubeconfig
I0920 16:44:51.235896 8307 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19672-2235/kubeconfig: {Name:mk389b7f7c7d441a0f49101972b4f99c06538341 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0920 16:44:51.236093 8307 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I0920 16:44:51.236108 8307 start.go:235] Will wait 6m0s for node &{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}
I0920 16:44:51.236343 8307 config.go:182] Loaded profile config "addons-877987": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.31.1
I0920 16:44:51.236374 8307 addons.go:507] enable addons start: toEnable=map[ambassador:false auto-pause:false cloud-spanner:true csi-hostpath-driver:true dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:true gvisor:false headlamp:false inaccel:false ingress:true ingress-dns:true inspektor-gadget:true istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:true nvidia-device-plugin:true nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:true registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:true volcano:true volumesnapshots:true yakd:true]
I0920 16:44:51.236458 8307 addons.go:69] Setting yakd=true in profile "addons-877987"
I0920 16:44:51.236474 8307 addons.go:234] Setting addon yakd=true in "addons-877987"
I0920 16:44:51.236505 8307 host.go:66] Checking if "addons-877987" exists ...
I0920 16:44:51.236958 8307 cli_runner.go:164] Run: docker container inspect addons-877987 --format={{.State.Status}}
I0920 16:44:51.237280 8307 addons.go:69] Setting inspektor-gadget=true in profile "addons-877987"
I0920 16:44:51.237302 8307 addons.go:234] Setting addon inspektor-gadget=true in "addons-877987"
I0920 16:44:51.237326 8307 host.go:66] Checking if "addons-877987" exists ...
I0920 16:44:51.237748 8307 cli_runner.go:164] Run: docker container inspect addons-877987 --format={{.State.Status}}
I0920 16:44:51.237975 8307 addons.go:69] Setting metrics-server=true in profile "addons-877987"
I0920 16:44:51.237994 8307 addons.go:234] Setting addon metrics-server=true in "addons-877987"
I0920 16:44:51.238018 8307 host.go:66] Checking if "addons-877987" exists ...
I0920 16:44:51.238566 8307 cli_runner.go:164] Run: docker container inspect addons-877987 --format={{.State.Status}}
I0920 16:44:51.242006 8307 addons.go:69] Setting cloud-spanner=true in profile "addons-877987"
I0920 16:44:51.242086 8307 addons.go:234] Setting addon cloud-spanner=true in "addons-877987"
I0920 16:44:51.242135 8307 host.go:66] Checking if "addons-877987" exists ...
I0920 16:44:51.242576 8307 addons.go:69] Setting nvidia-device-plugin=true in profile "addons-877987"
I0920 16:44:51.242594 8307 addons.go:234] Setting addon nvidia-device-plugin=true in "addons-877987"
I0920 16:44:51.242614 8307 host.go:66] Checking if "addons-877987" exists ...
I0920 16:44:51.243009 8307 cli_runner.go:164] Run: docker container inspect addons-877987 --format={{.State.Status}}
I0920 16:44:51.243511 8307 cli_runner.go:164] Run: docker container inspect addons-877987 --format={{.State.Status}}
I0920 16:44:51.243726 8307 addons.go:69] Setting csi-hostpath-driver=true in profile "addons-877987"
I0920 16:44:51.264444 8307 addons.go:234] Setting addon csi-hostpath-driver=true in "addons-877987"
I0920 16:44:51.264515 8307 host.go:66] Checking if "addons-877987" exists ...
I0920 16:44:51.265007 8307 cli_runner.go:164] Run: docker container inspect addons-877987 --format={{.State.Status}}
I0920 16:44:51.243735 8307 addons.go:69] Setting default-storageclass=true in profile "addons-877987"
I0920 16:44:51.277458 8307 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "addons-877987"
I0920 16:44:51.277828 8307 cli_runner.go:164] Run: docker container inspect addons-877987 --format={{.State.Status}}
I0920 16:44:51.243739 8307 addons.go:69] Setting gcp-auth=true in profile "addons-877987"
I0920 16:44:51.294716 8307 mustload.go:65] Loading cluster: addons-877987
I0920 16:44:51.294913 8307 config.go:182] Loaded profile config "addons-877987": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.31.1
I0920 16:44:51.295173 8307 cli_runner.go:164] Run: docker container inspect addons-877987 --format={{.State.Status}}
I0920 16:44:51.243743 8307 addons.go:69] Setting ingress=true in profile "addons-877987"
I0920 16:44:51.306370 8307 addons.go:234] Setting addon ingress=true in "addons-877987"
I0920 16:44:51.306422 8307 host.go:66] Checking if "addons-877987" exists ...
I0920 16:44:51.306919 8307 cli_runner.go:164] Run: docker container inspect addons-877987 --format={{.State.Status}}
I0920 16:44:51.243747 8307 addons.go:69] Setting ingress-dns=true in profile "addons-877987"
I0920 16:44:51.330411 8307 addons.go:234] Setting addon ingress-dns=true in "addons-877987"
I0920 16:44:51.330461 8307 host.go:66] Checking if "addons-877987" exists ...
I0920 16:44:51.330943 8307 cli_runner.go:164] Run: docker container inspect addons-877987 --format={{.State.Status}}
I0920 16:44:51.243773 8307 out.go:177] * Verifying Kubernetes components...
I0920 16:44:51.244150 8307 addons.go:69] Setting volcano=true in profile "addons-877987"
I0920 16:44:51.358608 8307 addons.go:234] Setting addon volcano=true in "addons-877987"
I0920 16:44:51.358650 8307 host.go:66] Checking if "addons-877987" exists ...
I0920 16:44:51.360048 8307 cli_runner.go:164] Run: docker container inspect addons-877987 --format={{.State.Status}}
I0920 16:44:51.244160 8307 addons.go:69] Setting registry=true in profile "addons-877987"
I0920 16:44:51.364270 8307 addons.go:234] Setting addon registry=true in "addons-877987"
I0920 16:44:51.364315 8307 host.go:66] Checking if "addons-877987" exists ...
I0920 16:44:51.364798 8307 cli_runner.go:164] Run: docker container inspect addons-877987 --format={{.State.Status}}
I0920 16:44:51.244164 8307 addons.go:69] Setting storage-provisioner=true in profile "addons-877987"
I0920 16:44:51.382232 8307 addons.go:234] Setting addon storage-provisioner=true in "addons-877987"
I0920 16:44:51.382269 8307 host.go:66] Checking if "addons-877987" exists ...
I0920 16:44:51.382866 8307 cli_runner.go:164] Run: docker container inspect addons-877987 --format={{.State.Status}}
I0920 16:44:51.244168 8307 addons.go:69] Setting storage-provisioner-rancher=true in profile "addons-877987"
I0920 16:44:51.398425 8307 addons_storage_classes.go:33] enableOrDisableStorageClasses storage-provisioner-rancher=true on "addons-877987"
I0920 16:44:51.398756 8307 cli_runner.go:164] Run: docker container inspect addons-877987 --format={{.State.Status}}
I0920 16:44:51.244802 8307 addons.go:69] Setting volumesnapshots=true in profile "addons-877987"
I0920 16:44:51.407614 8307 addons.go:234] Setting addon volumesnapshots=true in "addons-877987"
I0920 16:44:51.407649 8307 host.go:66] Checking if "addons-877987" exists ...
I0920 16:44:51.408292 8307 cli_runner.go:164] Run: docker container inspect addons-877987 --format={{.State.Status}}
I0920 16:44:51.410619 8307 out.go:177] - Using image registry.k8s.io/metrics-server/metrics-server:v0.7.2
I0920 16:44:51.413489 8307 out.go:177] - Using image ghcr.io/inspektor-gadget/inspektor-gadget:v0.32.0
I0920 16:44:51.414389 8307 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0920 16:44:51.438899 8307 addons.go:431] installing /etc/kubernetes/addons/metrics-apiservice.yaml
I0920 16:44:51.438982 8307 ssh_runner.go:362] scp metrics-server/metrics-apiservice.yaml --> /etc/kubernetes/addons/metrics-apiservice.yaml (424 bytes)
I0920 16:44:51.439191 8307 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877987
I0920 16:44:51.455920 8307 addons.go:431] installing /etc/kubernetes/addons/ig-namespace.yaml
I0920 16:44:51.455942 8307 ssh_runner.go:362] scp inspektor-gadget/ig-namespace.yaml --> /etc/kubernetes/addons/ig-namespace.yaml (55 bytes)
I0920 16:44:51.456006 8307 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877987
I0920 16:44:51.478383 8307 out.go:177] - Using image gcr.io/cloud-spanner-emulator/emulator:1.5.23
I0920 16:44:51.478497 8307 out.go:177] - Using image registry.k8s.io/sig-storage/csi-external-health-monitor-controller:v0.7.0
I0920 16:44:51.480679 8307 out.go:177] - Using image registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.6.0
I0920 16:44:51.480822 8307 addons.go:431] installing /etc/kubernetes/addons/deployment.yaml
I0920 16:44:51.480832 8307 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/deployment.yaml (1004 bytes)
I0920 16:44:51.480892 8307 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877987
I0920 16:44:51.485884 8307 out.go:177] - Using image registry.k8s.io/sig-storage/hostpathplugin:v1.9.0
I0920 16:44:51.490404 8307 out.go:177] - Using image docker.io/marcnuri/yakd:0.0.5
I0920 16:44:51.524246 8307 out.go:177] - Using image registry.k8s.io/ingress-nginx/controller:v1.11.2
I0920 16:44:51.524494 8307 host.go:66] Checking if "addons-877987" exists ...
I0920 16:44:51.540524 8307 addons.go:431] installing /etc/kubernetes/addons/yakd-ns.yaml
I0920 16:44:51.540543 8307 ssh_runner.go:362] scp yakd/yakd-ns.yaml --> /etc/kubernetes/addons/yakd-ns.yaml (171 bytes)
I0920 16:44:51.540644 8307 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877987
I0920 16:44:51.541272 8307 out.go:177] - Using image nvcr.io/nvidia/k8s-device-plugin:v0.16.2
I0920 16:44:51.546885 8307 addons.go:234] Setting addon default-storageclass=true in "addons-877987"
I0920 16:44:51.549056 8307 host.go:66] Checking if "addons-877987" exists ...
I0920 16:44:51.549481 8307 cli_runner.go:164] Run: docker container inspect addons-877987 --format={{.State.Status}}
I0920 16:44:51.552001 8307 out.go:177] - Using image registry.k8s.io/sig-storage/livenessprobe:v2.8.0
I0920 16:44:51.562131 8307 addons.go:431] installing /etc/kubernetes/addons/nvidia-device-plugin.yaml
I0920 16:44:51.562151 8307 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/nvidia-device-plugin.yaml (1966 bytes)
I0920 16:44:51.562215 8307 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877987
I0920 16:44:51.578424 8307 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.3
I0920 16:44:51.594609 8307 out.go:177] - Using image registry.k8s.io/sig-storage/snapshot-controller:v6.1.0
I0920 16:44:51.597848 8307 addons.go:234] Setting addon storage-provisioner-rancher=true in "addons-877987"
I0920 16:44:51.599353 8307 host.go:66] Checking if "addons-877987" exists ...
I0920 16:44:51.599847 8307 cli_runner.go:164] Run: docker container inspect addons-877987 --format={{.State.Status}}
I0920 16:44:51.598025 8307 out.go:177] - Using image registry.k8s.io/sig-storage/csi-resizer:v1.6.0
I0920 16:44:51.598039 8307 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.3
I0920 16:44:51.599297 8307 out.go:177] - Using image docker.io/volcanosh/vc-webhook-manager:v1.9.0
I0920 16:44:51.601560 8307 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml
I0920 16:44:51.604347 8307 out.go:177] - Using image docker.io/registry:2.8.3
I0920 16:44:51.608185 8307 ssh_runner.go:362] scp volumesnapshots/csi-hostpath-snapshotclass.yaml --> /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml (934 bytes)
I0920 16:44:51.608339 8307 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877987
I0920 16:44:51.628955 8307 out.go:177] - Using image registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0
I0920 16:44:51.633224 8307 out.go:177] - Using image registry.k8s.io/sig-storage/csi-provisioner:v3.3.0
I0920 16:44:51.635111 8307 out.go:177] - Using image registry.k8s.io/sig-storage/csi-attacher:v4.0.0
I0920 16:44:51.638096 8307 addons.go:431] installing /etc/kubernetes/addons/rbac-external-attacher.yaml
I0920 16:44:51.638126 8307 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-attacher.yaml --> /etc/kubernetes/addons/rbac-external-attacher.yaml (3073 bytes)
I0920 16:44:51.638193 8307 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877987
I0920 16:44:51.645576 8307 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19672-2235/.minikube/machines/addons-877987/id_rsa Username:docker}
I0920 16:44:51.650553 8307 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19672-2235/.minikube/machines/addons-877987/id_rsa Username:docker}
I0920 16:44:51.654079 8307 out.go:177] - Using image docker.io/volcanosh/vc-controller-manager:v1.9.0
I0920 16:44:51.654244 8307 out.go:177] - Using image gcr.io/k8s-minikube/kube-registry-proxy:0.0.6
I0920 16:44:51.654654 8307 addons.go:431] installing /etc/kubernetes/addons/ingress-deploy.yaml
I0920 16:44:51.654677 8307 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ingress-deploy.yaml (16078 bytes)
I0920 16:44:51.654744 8307 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877987
I0920 16:44:51.668656 8307 addons.go:431] installing /etc/kubernetes/addons/registry-rc.yaml
I0920 16:44:51.668677 8307 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-rc.yaml (860 bytes)
I0920 16:44:51.668732 8307 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877987
I0920 16:44:51.671484 8307 out.go:177] - Using image docker.io/volcanosh/vc-scheduler:v1.9.0
I0920 16:44:51.672081 8307 out.go:177] - Using image gcr.io/k8s-minikube/minikube-ingress-dns:0.0.3
I0920 16:44:51.676310 8307 addons.go:431] installing /etc/kubernetes/addons/ingress-dns-pod.yaml
I0920 16:44:51.676342 8307 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ingress-dns-pod.yaml (2442 bytes)
I0920 16:44:51.676406 8307 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877987
I0920 16:44:51.686757 8307 out.go:177] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I0920 16:44:51.688858 8307 addons.go:431] installing /etc/kubernetes/addons/storage-provisioner.yaml
I0920 16:44:51.688881 8307 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I0920 16:44:51.688948 8307 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877987
I0920 16:44:51.713667 8307 addons.go:431] installing /etc/kubernetes/addons/volcano-deployment.yaml
I0920 16:44:51.713690 8307 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/volcano-deployment.yaml (434001 bytes)
I0920 16:44:51.713751 8307 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877987
I0920 16:44:51.718405 8307 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19672-2235/.minikube/machines/addons-877987/id_rsa Username:docker}
I0920 16:44:51.719412 8307 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19672-2235/.minikube/machines/addons-877987/id_rsa Username:docker}
I0920 16:44:51.724073 8307 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19672-2235/.minikube/machines/addons-877987/id_rsa Username:docker}
I0920 16:44:51.743004 8307 addons.go:431] installing /etc/kubernetes/addons/storageclass.yaml
I0920 16:44:51.743023 8307 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I0920 16:44:51.743448 8307 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877987
I0920 16:44:51.761305 8307 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I0920 16:44:51.803002 8307 out.go:177] - Using image docker.io/rancher/local-path-provisioner:v0.0.22
I0920 16:44:51.809499 8307 out.go:177] - Using image docker.io/busybox:stable
I0920 16:44:51.816468 8307 addons.go:431] installing /etc/kubernetes/addons/storage-provisioner-rancher.yaml
I0920 16:44:51.816502 8307 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner-rancher.yaml (3113 bytes)
I0920 16:44:51.816565 8307 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877987
I0920 16:44:51.820786 8307 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19672-2235/.minikube/machines/addons-877987/id_rsa Username:docker}
I0920 16:44:51.828810 8307 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19672-2235/.minikube/machines/addons-877987/id_rsa Username:docker}
I0920 16:44:51.850946 8307 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19672-2235/.minikube/machines/addons-877987/id_rsa Username:docker}
I0920 16:44:51.852024 8307 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19672-2235/.minikube/machines/addons-877987/id_rsa Username:docker}
I0920 16:44:51.853262 8307 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19672-2235/.minikube/machines/addons-877987/id_rsa Username:docker}
I0920 16:44:51.874512 8307 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19672-2235/.minikube/machines/addons-877987/id_rsa Username:docker}
I0920 16:44:51.875196 8307 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19672-2235/.minikube/machines/addons-877987/id_rsa Username:docker}
I0920 16:44:51.888298 8307 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19672-2235/.minikube/machines/addons-877987/id_rsa Username:docker}
I0920 16:44:51.905351 8307 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19672-2235/.minikube/machines/addons-877987/id_rsa Username:docker}
I0920 16:44:51.921124 8307 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0920 16:44:52.475990 8307 addons.go:431] installing /etc/kubernetes/addons/ig-serviceaccount.yaml
I0920 16:44:52.476060 8307 ssh_runner.go:362] scp inspektor-gadget/ig-serviceaccount.yaml --> /etc/kubernetes/addons/ig-serviceaccount.yaml (80 bytes)
I0920 16:44:52.511363 8307 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ingress-deploy.yaml
I0920 16:44:52.556877 8307 addons.go:431] installing /etc/kubernetes/addons/yakd-sa.yaml
I0920 16:44:52.556954 8307 ssh_runner.go:362] scp yakd/yakd-sa.yaml --> /etc/kubernetes/addons/yakd-sa.yaml (247 bytes)
I0920 16:44:52.682242 8307 addons.go:431] installing /etc/kubernetes/addons/metrics-server-deployment.yaml
I0920 16:44:52.682268 8307 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-deployment.yaml (1907 bytes)
I0920 16:44:52.893314 8307 addons.go:431] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml
I0920 16:44:52.893338 8307 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshotclasses.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml (6471 bytes)
I0920 16:44:52.948650 8307 addons.go:431] installing /etc/kubernetes/addons/ig-role.yaml
I0920 16:44:52.948677 8307 ssh_runner.go:362] scp inspektor-gadget/ig-role.yaml --> /etc/kubernetes/addons/ig-role.yaml (210 bytes)
I0920 16:44:52.967605 8307 addons.go:431] installing /etc/kubernetes/addons/rbac-hostpath.yaml
I0920 16:44:52.967632 8307 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-hostpath.yaml --> /etc/kubernetes/addons/rbac-hostpath.yaml (4266 bytes)
I0920 16:44:53.097049 8307 addons.go:431] installing /etc/kubernetes/addons/metrics-server-rbac.yaml
I0920 16:44:53.097075 8307 ssh_runner.go:362] scp metrics-server/metrics-server-rbac.yaml --> /etc/kubernetes/addons/metrics-server-rbac.yaml (2175 bytes)
I0920 16:44:53.133697 8307 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/nvidia-device-plugin.yaml
I0920 16:44:53.135729 8307 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/volcano-deployment.yaml
I0920 16:44:53.139791 8307 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/deployment.yaml
I0920 16:44:53.146777 8307 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner-rancher.yaml
I0920 16:44:53.150850 8307 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I0920 16:44:53.176005 8307 addons.go:431] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml
I0920 16:44:53.176045 8307 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshotcontents.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml (23126 bytes)
I0920 16:44:53.204782 8307 addons.go:431] installing /etc/kubernetes/addons/ig-rolebinding.yaml
I0920 16:44:53.204821 8307 ssh_runner.go:362] scp inspektor-gadget/ig-rolebinding.yaml --> /etc/kubernetes/addons/ig-rolebinding.yaml (244 bytes)
I0920 16:44:53.211275 8307 addons.go:431] installing /etc/kubernetes/addons/yakd-crb.yaml
I0920 16:44:53.211304 8307 ssh_runner.go:362] scp yakd/yakd-crb.yaml --> /etc/kubernetes/addons/yakd-crb.yaml (422 bytes)
I0920 16:44:53.226801 8307 addons.go:431] installing /etc/kubernetes/addons/registry-svc.yaml
I0920 16:44:53.226829 8307 ssh_runner.go:362] scp registry/registry-svc.yaml --> /etc/kubernetes/addons/registry-svc.yaml (398 bytes)
I0920 16:44:53.244750 8307 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I0920 16:44:53.245408 8307 addons.go:431] installing /etc/kubernetes/addons/metrics-server-service.yaml
I0920 16:44:53.245429 8307 ssh_runner.go:362] scp metrics-server/metrics-server-service.yaml --> /etc/kubernetes/addons/metrics-server-service.yaml (446 bytes)
I0920 16:44:53.252937 8307 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ingress-dns-pod.yaml
I0920 16:44:53.263386 8307 addons.go:431] installing /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml
I0920 16:44:53.263427 8307 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-health-monitor-controller.yaml --> /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml (3038 bytes)
I0920 16:44:53.360075 8307 addons.go:431] installing /etc/kubernetes/addons/registry-proxy.yaml
I0920 16:44:53.360100 8307 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-proxy.yaml (947 bytes)
I0920 16:44:53.390826 8307 addons.go:431] installing /etc/kubernetes/addons/yakd-svc.yaml
I0920 16:44:53.390853 8307 ssh_runner.go:362] scp yakd/yakd-svc.yaml --> /etc/kubernetes/addons/yakd-svc.yaml (412 bytes)
I0920 16:44:53.394175 8307 addons.go:431] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml
I0920 16:44:53.394202 8307 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshots.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml (19582 bytes)
I0920 16:44:53.418231 8307 addons.go:431] installing /etc/kubernetes/addons/ig-clusterrole.yaml
I0920 16:44:53.418259 8307 ssh_runner.go:362] scp inspektor-gadget/ig-clusterrole.yaml --> /etc/kubernetes/addons/ig-clusterrole.yaml (1485 bytes)
I0920 16:44:53.490112 8307 addons.go:431] installing /etc/kubernetes/addons/yakd-dp.yaml
I0920 16:44:53.490147 8307 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/yakd-dp.yaml (2017 bytes)
I0920 16:44:53.525667 8307 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
I0920 16:44:53.543482 8307 addons.go:431] installing /etc/kubernetes/addons/rbac-external-provisioner.yaml
I0920 16:44:53.543522 8307 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-provisioner.yaml --> /etc/kubernetes/addons/rbac-external-provisioner.yaml (4442 bytes)
I0920 16:44:53.608975 8307 addons.go:431] installing /etc/kubernetes/addons/ig-clusterrolebinding.yaml
I0920 16:44:53.609018 8307 ssh_runner.go:362] scp inspektor-gadget/ig-clusterrolebinding.yaml --> /etc/kubernetes/addons/ig-clusterrolebinding.yaml (274 bytes)
I0920 16:44:53.626880 8307 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/registry-rc.yaml -f /etc/kubernetes/addons/registry-svc.yaml -f /etc/kubernetes/addons/registry-proxy.yaml
I0920 16:44:53.668730 8307 addons.go:431] installing /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml
I0920 16:44:53.668772 8307 ssh_runner.go:362] scp volumesnapshots/rbac-volume-snapshot-controller.yaml --> /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml (3545 bytes)
I0920 16:44:53.704424 8307 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/yakd-ns.yaml -f /etc/kubernetes/addons/yakd-sa.yaml -f /etc/kubernetes/addons/yakd-crb.yaml -f /etc/kubernetes/addons/yakd-svc.yaml -f /etc/kubernetes/addons/yakd-dp.yaml
I0920 16:44:53.793237 8307 addons.go:431] installing /etc/kubernetes/addons/rbac-external-resizer.yaml
I0920 16:44:53.793269 8307 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-resizer.yaml --> /etc/kubernetes/addons/rbac-external-resizer.yaml (2943 bytes)
I0920 16:44:53.836682 8307 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -": (2.075335052s)
I0920 16:44:53.836713 8307 start.go:971] {"host.minikube.internal": 192.168.49.1} host record injected into CoreDNS's ConfigMap
I0920 16:44:53.837729 8307 ssh_runner.go:235] Completed: sudo systemctl start kubelet: (1.916582333s)
I0920 16:44:53.838465 8307 node_ready.go:35] waiting up to 6m0s for node "addons-877987" to be "Ready" ...
I0920 16:44:53.843719 8307 node_ready.go:49] node "addons-877987" has status "Ready":"True"
I0920 16:44:53.843746 8307 node_ready.go:38] duration metric: took 5.256118ms for node "addons-877987" to be "Ready" ...
I0920 16:44:53.843756 8307 pod_ready.go:36] extra waiting up to 6m0s for all system-critical pods including labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0920 16:44:53.853678 8307 pod_ready.go:79] waiting up to 6m0s for pod "coredns-7c65d6cfc9-s4fhl" in "kube-system" namespace to be "Ready" ...
I0920 16:44:53.853965 8307 addons.go:431] installing /etc/kubernetes/addons/ig-crd.yaml
I0920 16:44:53.854095 8307 ssh_runner.go:362] scp inspektor-gadget/ig-crd.yaml --> /etc/kubernetes/addons/ig-crd.yaml (5216 bytes)
I0920 16:44:54.068073 8307 addons.go:431] installing /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I0920 16:44:54.068149 8307 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml (1475 bytes)
I0920 16:44:54.262422 8307 addons.go:431] installing /etc/kubernetes/addons/rbac-external-snapshotter.yaml
I0920 16:44:54.262496 8307 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-snapshotter.yaml --> /etc/kubernetes/addons/rbac-external-snapshotter.yaml (3149 bytes)
I0920 16:44:54.317710 8307 addons.go:431] installing /etc/kubernetes/addons/ig-daemonset.yaml
I0920 16:44:54.317773 8307 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-daemonset.yaml (7735 bytes)
I0920 16:44:54.340644 8307 kapi.go:214] "coredns" deployment in "kube-system" namespace and "addons-877987" context rescaled to 1 replicas
I0920 16:44:54.364176 8307 pod_ready.go:93] pod "coredns-7c65d6cfc9-s4fhl" in "kube-system" namespace has status "Ready":"True"
I0920 16:44:54.364250 8307 pod_ready.go:82] duration metric: took 510.260504ms for pod "coredns-7c65d6cfc9-s4fhl" in "kube-system" namespace to be "Ready" ...
I0920 16:44:54.364276 8307 pod_ready.go:79] waiting up to 6m0s for pod "coredns-7c65d6cfc9-v86mg" in "kube-system" namespace to be "Ready" ...
I0920 16:44:54.370505 8307 pod_ready.go:93] pod "coredns-7c65d6cfc9-v86mg" in "kube-system" namespace has status "Ready":"True"
I0920 16:44:54.370576 8307 pod_ready.go:82] duration metric: took 6.278163ms for pod "coredns-7c65d6cfc9-v86mg" in "kube-system" namespace to be "Ready" ...
I0920 16:44:54.370604 8307 pod_ready.go:79] waiting up to 6m0s for pod "etcd-addons-877987" in "kube-system" namespace to be "Ready" ...
I0920 16:44:54.376457 8307 pod_ready.go:93] pod "etcd-addons-877987" in "kube-system" namespace has status "Ready":"True"
I0920 16:44:54.376527 8307 pod_ready.go:82] duration metric: took 5.902408ms for pod "etcd-addons-877987" in "kube-system" namespace to be "Ready" ...
I0920 16:44:54.376553 8307 pod_ready.go:79] waiting up to 6m0s for pod "kube-apiserver-addons-877987" in "kube-system" namespace to be "Ready" ...
I0920 16:44:54.411380 8307 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I0920 16:44:54.544026 8307 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-attacher.yaml
I0920 16:44:54.544109 8307 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-attacher.yaml (2143 bytes)
I0920 16:44:54.691608 8307 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ig-namespace.yaml -f /etc/kubernetes/addons/ig-serviceaccount.yaml -f /etc/kubernetes/addons/ig-role.yaml -f /etc/kubernetes/addons/ig-rolebinding.yaml -f /etc/kubernetes/addons/ig-clusterrole.yaml -f /etc/kubernetes/addons/ig-clusterrolebinding.yaml -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-daemonset.yaml
I0920 16:44:54.842676 8307 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml
I0920 16:44:54.842747 8307 ssh_runner.go:362] scp csi-hostpath-driver/deploy/csi-hostpath-driverinfo.yaml --> /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml (1274 bytes)
I0920 16:44:55.297198 8307 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-plugin.yaml
I0920 16:44:55.297272 8307 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-plugin.yaml (8201 bytes)
I0920 16:44:56.019062 8307 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-resizer.yaml
I0920 16:44:56.019092 8307 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-resizer.yaml (2191 bytes)
I0920 16:44:56.382727 8307 pod_ready.go:103] pod "kube-apiserver-addons-877987" in "kube-system" namespace has status "Ready":"False"
I0920 16:44:56.460998 8307 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-storageclass.yaml
I0920 16:44:56.461036 8307 ssh_runner.go:362] scp csi-hostpath-driver/deploy/csi-hostpath-storageclass.yaml --> /etc/kubernetes/addons/csi-hostpath-storageclass.yaml (846 bytes)
I0920 16:44:57.028794 8307 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/rbac-external-attacher.yaml -f /etc/kubernetes/addons/rbac-hostpath.yaml -f /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml -f /etc/kubernetes/addons/rbac-external-provisioner.yaml -f /etc/kubernetes/addons/rbac-external-resizer.yaml -f /etc/kubernetes/addons/rbac-external-snapshotter.yaml -f /etc/kubernetes/addons/csi-hostpath-attacher.yaml -f /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml -f /etc/kubernetes/addons/csi-hostpath-plugin.yaml -f /etc/kubernetes/addons/csi-hostpath-resizer.yaml -f /etc/kubernetes/addons/csi-hostpath-storageclass.yaml
I0920 16:44:57.384220 8307 pod_ready.go:93] pod "kube-apiserver-addons-877987" in "kube-system" namespace has status "Ready":"True"
I0920 16:44:57.384250 8307 pod_ready.go:82] duration metric: took 3.007675895s for pod "kube-apiserver-addons-877987" in "kube-system" namespace to be "Ready" ...
I0920 16:44:57.384263 8307 pod_ready.go:79] waiting up to 6m0s for pod "kube-controller-manager-addons-877987" in "kube-system" namespace to be "Ready" ...
I0920 16:44:57.393540 8307 pod_ready.go:93] pod "kube-controller-manager-addons-877987" in "kube-system" namespace has status "Ready":"True"
I0920 16:44:57.393567 8307 pod_ready.go:82] duration metric: took 9.295462ms for pod "kube-controller-manager-addons-877987" in "kube-system" namespace to be "Ready" ...
I0920 16:44:57.393579 8307 pod_ready.go:79] waiting up to 6m0s for pod "kube-proxy-hxdck" in "kube-system" namespace to be "Ready" ...
I0920 16:44:57.443776 8307 pod_ready.go:93] pod "kube-proxy-hxdck" in "kube-system" namespace has status "Ready":"True"
I0920 16:44:57.443803 8307 pod_ready.go:82] duration metric: took 50.216182ms for pod "kube-proxy-hxdck" in "kube-system" namespace to be "Ready" ...
I0920 16:44:57.443815 8307 pod_ready.go:79] waiting up to 6m0s for pod "kube-scheduler-addons-877987" in "kube-system" namespace to be "Ready" ...
I0920 16:44:57.843454 8307 pod_ready.go:93] pod "kube-scheduler-addons-877987" in "kube-system" namespace has status "Ready":"True"
I0920 16:44:57.843499 8307 pod_ready.go:82] duration metric: took 399.67668ms for pod "kube-scheduler-addons-877987" in "kube-system" namespace to be "Ready" ...
I0920 16:44:57.843509 8307 pod_ready.go:39] duration metric: took 3.999740974s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0920 16:44:57.843528 8307 api_server.go:52] waiting for apiserver process to appear ...
I0920 16:44:57.843607 8307 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0920 16:44:58.557797 8307 ssh_runner.go:362] scp memory --> /var/lib/minikube/google_application_credentials.json (162 bytes)
I0920 16:44:58.557950 8307 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877987
I0920 16:44:58.586258 8307 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19672-2235/.minikube/machines/addons-877987/id_rsa Username:docker}
I0920 16:44:59.631435 8307 ssh_runner.go:362] scp memory --> /var/lib/minikube/google_cloud_project (12 bytes)
I0920 16:44:59.856223 8307 addons.go:234] Setting addon gcp-auth=true in "addons-877987"
I0920 16:44:59.856323 8307 host.go:66] Checking if "addons-877987" exists ...
I0920 16:44:59.856841 8307 cli_runner.go:164] Run: docker container inspect addons-877987 --format={{.State.Status}}
I0920 16:44:59.882497 8307 ssh_runner.go:195] Run: cat /var/lib/minikube/google_application_credentials.json
I0920 16:44:59.882559 8307 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-877987
I0920 16:44:59.912994 8307 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19672-2235/.minikube/machines/addons-877987/id_rsa Username:docker}
I0920 16:45:01.778665 8307 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ingress-deploy.yaml: (9.26722825s)
I0920 16:45:01.778708 8307 addons.go:475] Verifying addon ingress=true in "addons-877987"
I0920 16:45:01.778758 8307 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/nvidia-device-plugin.yaml: (8.644961375s)
I0920 16:45:01.781503 8307 out.go:177] * Verifying ingress addon...
I0920 16:45:01.784619 8307 kapi.go:75] Waiting for pod with label "app.kubernetes.io/name=ingress-nginx" in ns "ingress-nginx" ...
I0920 16:45:01.790573 8307 kapi.go:86] Found 3 Pods for label selector app.kubernetes.io/name=ingress-nginx
I0920 16:45:01.790599 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:02.332055 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:02.839504 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:03.289301 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:03.493444 8307 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/volcano-deployment.yaml: (10.357668781s)
I0920 16:45:03.493513 8307 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/deployment.yaml: (10.353685028s)
I0920 16:45:03.493575 8307 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner-rancher.yaml: (10.346774548s)
I0920 16:45:03.493764 8307 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: (10.342889653s)
I0920 16:45:03.493940 8307 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (10.249163312s)
I0920 16:45:03.494005 8307 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ingress-dns-pod.yaml: (10.241043057s)
I0920 16:45:03.494081 8307 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: (9.968389219s)
I0920 16:45:03.494095 8307 addons.go:475] Verifying addon metrics-server=true in "addons-877987"
I0920 16:45:03.494135 8307 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/registry-rc.yaml -f /etc/kubernetes/addons/registry-svc.yaml -f /etc/kubernetes/addons/registry-proxy.yaml: (9.867230416s)
I0920 16:45:03.494149 8307 addons.go:475] Verifying addon registry=true in "addons-877987"
I0920 16:45:03.494454 8307 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/yakd-ns.yaml -f /etc/kubernetes/addons/yakd-sa.yaml -f /etc/kubernetes/addons/yakd-crb.yaml -f /etc/kubernetes/addons/yakd-svc.yaml -f /etc/kubernetes/addons/yakd-dp.yaml: (9.790001206s)
I0920 16:45:03.494596 8307 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: (9.083133722s)
W0920 16:45:03.494621 8307 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: Process exited with status 1
stdout:
customresourcedefinition.apiextensions.k8s.io/volumesnapshotclasses.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshotcontents.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshots.snapshot.storage.k8s.io created
serviceaccount/snapshot-controller created
clusterrole.rbac.authorization.k8s.io/snapshot-controller-runner created
clusterrolebinding.rbac.authorization.k8s.io/snapshot-controller-role created
role.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
rolebinding.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
deployment.apps/snapshot-controller created
stderr:
error: resource mapping not found for name: "csi-hostpath-snapclass" namespace: "" from "/etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml": no matches for kind "VolumeSnapshotClass" in version "snapshot.storage.k8s.io/v1"
ensure CRDs are installed first
I0920 16:45:03.494650 8307 retry.go:31] will retry after 215.838179ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: Process exited with status 1
stdout:
customresourcedefinition.apiextensions.k8s.io/volumesnapshotclasses.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshotcontents.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshots.snapshot.storage.k8s.io created
serviceaccount/snapshot-controller created
clusterrole.rbac.authorization.k8s.io/snapshot-controller-runner created
clusterrolebinding.rbac.authorization.k8s.io/snapshot-controller-role created
role.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
rolebinding.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
deployment.apps/snapshot-controller created
stderr:
error: resource mapping not found for name: "csi-hostpath-snapclass" namespace: "" from "/etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml": no matches for kind "VolumeSnapshotClass" in version "snapshot.storage.k8s.io/v1"
ensure CRDs are installed first
I0920 16:45:03.494718 8307 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ig-namespace.yaml -f /etc/kubernetes/addons/ig-serviceaccount.yaml -f /etc/kubernetes/addons/ig-role.yaml -f /etc/kubernetes/addons/ig-rolebinding.yaml -f /etc/kubernetes/addons/ig-clusterrole.yaml -f /etc/kubernetes/addons/ig-clusterrolebinding.yaml -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-daemonset.yaml: (8.803034959s)
I0920 16:45:03.497613 8307 out.go:177] * To access YAKD - Kubernetes Dashboard, wait for Pod to be ready and run the following command:
minikube -p addons-877987 service yakd-dashboard -n yakd-dashboard
I0920 16:45:03.497714 8307 out.go:177] * Verifying registry addon...
I0920 16:45:03.503497 8307 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=registry" in ns "kube-system" ...
I0920 16:45:03.520155 8307 kapi.go:86] Found 2 Pods for label selector kubernetes.io/minikube-addons=registry
I0920 16:45:03.520185 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
W0920 16:45:03.552565 8307 out.go:270] ! Enabling 'default-storageclass' returned an error: running callbacks: [Error making standard the default storage class: Error while marking storage class local-path as non-default: Operation cannot be fulfilled on storageclasses.storage.k8s.io "local-path": the object has been modified; please apply your changes to the latest version and try again]
I0920 16:45:03.711400 8307 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply --force -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I0920 16:45:03.801438 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:04.010911 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:04.289486 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:04.527349 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:04.765533 8307 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/rbac-external-attacher.yaml -f /etc/kubernetes/addons/rbac-hostpath.yaml -f /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml -f /etc/kubernetes/addons/rbac-external-provisioner.yaml -f /etc/kubernetes/addons/rbac-external-resizer.yaml -f /etc/kubernetes/addons/rbac-external-snapshotter.yaml -f /etc/kubernetes/addons/csi-hostpath-attacher.yaml -f /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml -f /etc/kubernetes/addons/csi-hostpath-plugin.yaml -f /etc/kubernetes/addons/csi-hostpath-resizer.yaml -f /etc/kubernetes/addons/csi-hostpath-storageclass.yaml: (7.736675618s)
I0920 16:45:04.765579 8307 addons.go:475] Verifying addon csi-hostpath-driver=true in "addons-877987"
I0920 16:45:04.765784 8307 ssh_runner.go:235] Completed: sudo pgrep -xnf kube-apiserver.*minikube.*: (6.922158431s)
I0920 16:45:04.765869 8307 api_server.go:72] duration metric: took 13.529739125s to wait for apiserver process to appear ...
I0920 16:45:04.765891 8307 api_server.go:88] waiting for apiserver healthz status ...
I0920 16:45:04.765909 8307 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8443/healthz ...
I0920 16:45:04.765997 8307 ssh_runner.go:235] Completed: cat /var/lib/minikube/google_application_credentials.json: (4.883474595s)
I0920 16:45:04.768694 8307 out.go:177] - Using image gcr.io/k8s-minikube/gcp-auth-webhook:v0.1.2
I0920 16:45:04.768768 8307 out.go:177] * Verifying csi-hostpath-driver addon...
I0920 16:45:04.770990 8307 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.3
I0920 16:45:04.771993 8307 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=csi-hostpath-driver" in ns "kube-system" ...
I0920 16:45:04.773870 8307 addons.go:431] installing /etc/kubernetes/addons/gcp-auth-ns.yaml
I0920 16:45:04.773892 8307 ssh_runner.go:362] scp gcp-auth/gcp-auth-ns.yaml --> /etc/kubernetes/addons/gcp-auth-ns.yaml (700 bytes)
I0920 16:45:04.791355 8307 api_server.go:279] https://192.168.49.2:8443/healthz returned 200:
ok
I0920 16:45:04.792998 8307 api_server.go:141] control plane version: v1.31.1
I0920 16:45:04.794523 8307 api_server.go:131] duration metric: took 28.624631ms to wait for apiserver health ...
I0920 16:45:04.794574 8307 system_pods.go:43] waiting for kube-system pods to appear ...
I0920 16:45:04.794264 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:04.794486 8307 kapi.go:86] Found 3 Pods for label selector kubernetes.io/minikube-addons=csi-hostpath-driver
I0920 16:45:04.794828 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:04.805767 8307 system_pods.go:59] 17 kube-system pods found
I0920 16:45:04.805860 8307 system_pods.go:61] "coredns-7c65d6cfc9-v86mg" [ea32d161-0a4e-45c3-a5cc-6ae8fd180f7d] Running
I0920 16:45:04.805887 8307 system_pods.go:61] "csi-hostpath-attacher-0" [1f75974e-07d7-4a96-8e80-0b65f501953f] Pending / Ready:ContainersNotReady (containers with unready status: [csi-attacher]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-attacher])
I0920 16:45:04.805926 8307 system_pods.go:61] "csi-hostpath-resizer-0" [19a86aec-4fe1-4f1b-8860-193df89cac24] Pending / Ready:ContainersNotReady (containers with unready status: [csi-resizer]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-resizer])
I0920 16:45:04.805959 8307 system_pods.go:61] "csi-hostpathplugin-zzsqz" [d4342400-11b5-4f45-93db-90e73a576254] Pending / Ready:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter])
I0920 16:45:04.805985 8307 system_pods.go:61] "etcd-addons-877987" [7efeac93-eac9-4e6e-ba04-91346c442ea5] Running
I0920 16:45:04.806012 8307 system_pods.go:61] "kube-apiserver-addons-877987" [bbd58a20-13fc-4b63-9b17-33ce089ae741] Running
I0920 16:45:04.806043 8307 system_pods.go:61] "kube-controller-manager-addons-877987" [65055957-39c9-45d0-b5dd-bbac2ff32526] Running
I0920 16:45:04.806071 8307 system_pods.go:61] "kube-ingress-dns-minikube" [0af9cd0c-aaef-4ff9-98ee-3a5c49360681] Pending / Ready:ContainersNotReady (containers with unready status: [minikube-ingress-dns]) / ContainersReady:ContainersNotReady (containers with unready status: [minikube-ingress-dns])
I0920 16:45:04.806103 8307 system_pods.go:61] "kube-proxy-hxdck" [9dc379c3-eb77-443b-a7fd-47c094a1b18a] Running
I0920 16:45:04.806128 8307 system_pods.go:61] "kube-scheduler-addons-877987" [c15278e1-2255-408e-a68a-4e23ef4b7129] Running
I0920 16:45:04.806181 8307 system_pods.go:61] "metrics-server-84c5f94fbc-gmqh2" [f6899345-ec86-427c-9cdd-46f043d24818] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
I0920 16:45:04.806206 8307 system_pods.go:61] "nvidia-device-plugin-daemonset-wrczs" [afc95ef0-9c2a-4b80-a5c8-3df87415fdcc] Pending / Ready:ContainersNotReady (containers with unready status: [nvidia-device-plugin-ctr]) / ContainersReady:ContainersNotReady (containers with unready status: [nvidia-device-plugin-ctr])
I0920 16:45:04.806243 8307 system_pods.go:61] "registry-66c9cd494c-lmt9d" [2a3a6aaa-b147-4517-bdc2-529c58ed2d26] Pending / Ready:ContainersNotReady (containers with unready status: [registry]) / ContainersReady:ContainersNotReady (containers with unready status: [registry])
I0920 16:45:04.806272 8307 system_pods.go:61] "registry-proxy-2wp2r" [c8ba5e64-c35c-4fdb-8dfb-ede028619b44] Pending / Ready:ContainersNotReady (containers with unready status: [registry-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [registry-proxy])
I0920 16:45:04.806294 8307 system_pods.go:61] "snapshot-controller-56fcc65765-j8xlq" [61a890d1-ae33-4878-b779-02d606e1fe0d] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
I0920 16:45:04.806419 8307 system_pods.go:61] "snapshot-controller-56fcc65765-wmcvm" [026cd01c-d15b-4fb6-831d-db609208af92] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
I0920 16:45:04.806462 8307 system_pods.go:61] "storage-provisioner" [e3eb2ce0-20de-46a5-8c65-043a2623eb44] Running
I0920 16:45:04.806485 8307 system_pods.go:74] duration metric: took 11.880539ms to wait for pod list to return data ...
I0920 16:45:04.806509 8307 default_sa.go:34] waiting for default service account to be created ...
I0920 16:45:04.809500 8307 default_sa.go:45] found service account: "default"
I0920 16:45:04.809561 8307 default_sa.go:55] duration metric: took 3.020877ms for default service account to be created ...
I0920 16:45:04.809584 8307 system_pods.go:116] waiting for k8s-apps to be running ...
I0920 16:45:04.819052 8307 system_pods.go:86] 17 kube-system pods found
I0920 16:45:04.819131 8307 system_pods.go:89] "coredns-7c65d6cfc9-v86mg" [ea32d161-0a4e-45c3-a5cc-6ae8fd180f7d] Running
I0920 16:45:04.819158 8307 system_pods.go:89] "csi-hostpath-attacher-0" [1f75974e-07d7-4a96-8e80-0b65f501953f] Pending / Ready:ContainersNotReady (containers with unready status: [csi-attacher]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-attacher])
I0920 16:45:04.819185 8307 system_pods.go:89] "csi-hostpath-resizer-0" [19a86aec-4fe1-4f1b-8860-193df89cac24] Pending / Ready:ContainersNotReady (containers with unready status: [csi-resizer]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-resizer])
I0920 16:45:04.819237 8307 system_pods.go:89] "csi-hostpathplugin-zzsqz" [d4342400-11b5-4f45-93db-90e73a576254] Pending / Ready:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter])
I0920 16:45:04.819257 8307 system_pods.go:89] "etcd-addons-877987" [7efeac93-eac9-4e6e-ba04-91346c442ea5] Running
I0920 16:45:04.819283 8307 system_pods.go:89] "kube-apiserver-addons-877987" [bbd58a20-13fc-4b63-9b17-33ce089ae741] Running
I0920 16:45:04.819318 8307 system_pods.go:89] "kube-controller-manager-addons-877987" [65055957-39c9-45d0-b5dd-bbac2ff32526] Running
I0920 16:45:04.819343 8307 system_pods.go:89] "kube-ingress-dns-minikube" [0af9cd0c-aaef-4ff9-98ee-3a5c49360681] Pending / Ready:ContainersNotReady (containers with unready status: [minikube-ingress-dns]) / ContainersReady:ContainersNotReady (containers with unready status: [minikube-ingress-dns])
I0920 16:45:04.819374 8307 system_pods.go:89] "kube-proxy-hxdck" [9dc379c3-eb77-443b-a7fd-47c094a1b18a] Running
I0920 16:45:04.819405 8307 system_pods.go:89] "kube-scheduler-addons-877987" [c15278e1-2255-408e-a68a-4e23ef4b7129] Running
I0920 16:45:04.819431 8307 system_pods.go:89] "metrics-server-84c5f94fbc-gmqh2" [f6899345-ec86-427c-9cdd-46f043d24818] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
I0920 16:45:04.819459 8307 system_pods.go:89] "nvidia-device-plugin-daemonset-wrczs" [afc95ef0-9c2a-4b80-a5c8-3df87415fdcc] Pending / Ready:ContainersNotReady (containers with unready status: [nvidia-device-plugin-ctr]) / ContainersReady:ContainersNotReady (containers with unready status: [nvidia-device-plugin-ctr])
I0920 16:45:04.819488 8307 system_pods.go:89] "registry-66c9cd494c-lmt9d" [2a3a6aaa-b147-4517-bdc2-529c58ed2d26] Pending / Ready:ContainersNotReady (containers with unready status: [registry]) / ContainersReady:ContainersNotReady (containers with unready status: [registry])
I0920 16:45:04.819520 8307 system_pods.go:89] "registry-proxy-2wp2r" [c8ba5e64-c35c-4fdb-8dfb-ede028619b44] Pending / Ready:ContainersNotReady (containers with unready status: [registry-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [registry-proxy])
I0920 16:45:04.819550 8307 system_pods.go:89] "snapshot-controller-56fcc65765-j8xlq" [61a890d1-ae33-4878-b779-02d606e1fe0d] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
I0920 16:45:04.819576 8307 system_pods.go:89] "snapshot-controller-56fcc65765-wmcvm" [026cd01c-d15b-4fb6-831d-db609208af92] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
I0920 16:45:04.819600 8307 system_pods.go:89] "storage-provisioner" [e3eb2ce0-20de-46a5-8c65-043a2623eb44] Running
I0920 16:45:04.819636 8307 system_pods.go:126] duration metric: took 10.032735ms to wait for k8s-apps to be running ...
I0920 16:45:04.819663 8307 system_svc.go:44] waiting for kubelet service to be running ....
I0920 16:45:04.819760 8307 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I0920 16:45:04.913086 8307 addons.go:431] installing /etc/kubernetes/addons/gcp-auth-service.yaml
I0920 16:45:04.913151 8307 ssh_runner.go:362] scp gcp-auth/gcp-auth-service.yaml --> /etc/kubernetes/addons/gcp-auth-service.yaml (788 bytes)
I0920 16:45:05.002422 8307 addons.go:431] installing /etc/kubernetes/addons/gcp-auth-webhook.yaml
I0920 16:45:05.002488 8307 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/gcp-auth-webhook.yaml (5421 bytes)
I0920 16:45:05.007462 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:05.066265 8307 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/gcp-auth-ns.yaml -f /etc/kubernetes/addons/gcp-auth-service.yaml -f /etc/kubernetes/addons/gcp-auth-webhook.yaml
I0920 16:45:05.277630 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:05.289572 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:05.507831 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:05.777097 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:05.789276 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:06.010395 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:06.278591 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:06.290324 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:06.293420 8307 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply --force -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: (2.581970119s)
I0920 16:45:06.293554 8307 ssh_runner.go:235] Completed: sudo systemctl is-active --quiet service kubelet: (1.473766655s)
I0920 16:45:06.293608 8307 system_svc.go:56] duration metric: took 1.473942471s WaitForService to wait for kubelet
I0920 16:45:06.293641 8307 kubeadm.go:582] duration metric: took 15.057502251s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0920 16:45:06.293683 8307 node_conditions.go:102] verifying NodePressure condition ...
I0920 16:45:06.299431 8307 node_conditions.go:122] node storage ephemeral capacity is 203034800Ki
I0920 16:45:06.299514 8307 node_conditions.go:123] node cpu capacity is 2
I0920 16:45:06.299543 8307 node_conditions.go:105] duration metric: took 5.835406ms to run NodePressure ...
I0920 16:45:06.299584 8307 start.go:241] waiting for startup goroutines ...
I0920 16:45:06.481505 8307 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/gcp-auth-ns.yaml -f /etc/kubernetes/addons/gcp-auth-service.yaml -f /etc/kubernetes/addons/gcp-auth-webhook.yaml: (1.41510123s)
I0920 16:45:06.485045 8307 addons.go:475] Verifying addon gcp-auth=true in "addons-877987"
I0920 16:45:06.488290 8307 out.go:177] * Verifying gcp-auth addon...
I0920 16:45:06.491231 8307 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=gcp-auth" in ns "gcp-auth" ...
I0920 16:45:06.498106 8307 kapi.go:86] Found 0 Pods for label selector kubernetes.io/minikube-addons=gcp-auth
I0920 16:45:06.601339 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:06.780453 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:06.790215 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:07.006880 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:07.277277 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:07.289553 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:07.507465 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:07.777270 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:07.793294 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:08.007647 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:08.277785 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:08.289211 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:08.507197 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:08.777840 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:08.788895 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:09.007315 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:09.277863 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:09.289078 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:09.507647 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:09.777095 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:09.789998 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:10.008349 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:10.277878 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:10.289485 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:10.507917 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:10.779792 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:10.789271 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:11.007052 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:11.277148 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:11.289531 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:11.507110 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:11.777872 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:11.789504 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:12.010460 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:12.278442 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:12.289210 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:12.507581 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:12.777180 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:12.789098 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:13.007908 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:13.281452 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:13.289398 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:13.596340 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:13.777637 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:13.789877 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:14.008153 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:14.278793 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:14.291032 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:14.507821 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:14.777983 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:14.789287 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:15.007567 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:15.276512 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:15.289160 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:15.507332 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:15.778543 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:15.790003 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:16.008521 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:16.277252 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:16.289379 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:16.508031 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:16.777732 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:16.789477 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:17.007499 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:17.278640 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:17.288617 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:17.507681 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:17.777254 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:17.788753 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:18.007320 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:18.277562 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:18.290109 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:18.507480 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:18.778264 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:18.789919 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:19.007129 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:19.276661 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:19.289287 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:19.508894 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:19.778764 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:19.789515 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:20.007413 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:20.298139 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:20.299443 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:20.510175 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:20.777777 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:20.789356 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:21.007223 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:21.278806 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:21.289408 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:21.508736 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:21.777465 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:21.789672 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:22.007657 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:22.277135 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:22.288691 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:22.508210 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:22.776973 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:22.789992 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:23.007746 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:23.277499 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:23.289916 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:23.507338 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:23.777885 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:23.789876 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:24.007355 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:24.277154 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:24.289457 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:24.506989 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:24.777128 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:24.789167 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:25.007517 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:25.278364 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:25.290142 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:25.507788 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:25.777676 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:25.789289 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:26.007822 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:26.276932 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:26.290162 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:26.507659 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:26.777466 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:26.790457 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:27.007708 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:27.278403 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:27.289843 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:27.507163 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0920 16:45:27.777578 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:27.793746 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:28.007117 8307 kapi.go:107] duration metric: took 24.503616984s to wait for kubernetes.io/minikube-addons=registry ...
I0920 16:45:28.276787 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:28.288460 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:28.777894 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:28.789981 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:29.276984 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:29.288924 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:29.777724 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:29.790224 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:30.277451 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:30.290842 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:30.776846 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:30.789167 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:31.276623 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:31.289346 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:31.777897 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:31.789175 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:32.276799 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:32.288875 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:32.778423 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:32.791172 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:33.278641 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:33.297129 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:33.785400 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:33.788996 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:34.279663 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:34.289832 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:34.780206 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:34.789532 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:35.280833 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:35.293528 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:35.778059 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:35.789397 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:36.276984 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:36.289229 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:36.777161 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:36.789074 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:37.277003 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:37.292096 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:37.777174 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:37.878379 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:38.277256 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:38.288901 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:38.776973 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:38.789186 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:39.277601 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:39.289159 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:39.778730 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:39.789424 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:40.277671 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:40.290446 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:40.777758 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:40.789130 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:41.277674 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:41.291854 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:41.777693 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:41.792206 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:42.293586 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:42.297483 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:42.776524 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:42.791014 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:43.277141 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:43.289062 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:43.777724 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:43.789653 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:44.277317 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:44.289609 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:44.776581 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:44.789899 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:45.314028 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:45.315680 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:45.777343 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:45.789262 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:46.279196 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:46.290912 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:46.776921 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:46.789166 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:47.277544 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:47.289290 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:47.798172 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:47.799100 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:48.277466 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:48.289682 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:48.779018 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:48.789756 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:49.276971 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:49.292509 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:49.777680 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:49.788670 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:50.277035 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:50.289332 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:50.777597 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:50.789985 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:51.277943 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:51.290517 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:51.778953 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:51.788926 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:52.279316 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:52.291471 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:52.777729 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:52.789560 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:53.277245 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:53.289305 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:53.776628 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:53.788625 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:54.277144 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:54.289494 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:54.777125 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:54.789452 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:55.277360 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:55.289497 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:55.777674 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:55.789740 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:56.277296 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:56.289187 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:56.776043 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:56.789276 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:57.276224 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:57.289379 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:57.776911 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:57.788647 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:58.276768 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:58.288816 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:58.777078 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:58.788954 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:59.280992 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:59.288903 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:45:59.777737 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:45:59.789582 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:46:00.288617 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0920 16:46:00.327321 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:46:00.777478 8307 kapi.go:107] duration metric: took 56.005480505s to wait for kubernetes.io/minikube-addons=csi-hostpath-driver ...
I0920 16:46:00.790429 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:46:01.289726 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:46:01.788779 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:46:02.290224 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:46:02.789469 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:46:03.289287 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:46:03.789681 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:46:04.289492 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:46:04.789664 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:46:05.290394 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:46:05.789853 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:46:06.289152 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:46:06.789934 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:46:07.289759 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:46:07.789801 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:46:08.291132 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:46:08.789547 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:46:09.289169 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:46:09.790603 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:46:10.289859 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:46:10.789524 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:46:11.289727 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:46:11.789583 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:46:12.290722 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:46:12.800053 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:46:13.292159 8307 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0920 16:46:13.795164 8307 kapi.go:107] duration metric: took 1m12.010544183s to wait for app.kubernetes.io/name=ingress-nginx ...
I0920 16:46:28.521701 8307 kapi.go:86] Found 1 Pods for label selector kubernetes.io/minikube-addons=gcp-auth
I0920 16:46:28.521730 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:28.995622 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:29.495711 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:29.995194 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:30.495567 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:30.994516 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:31.494446 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:31.994999 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:32.495588 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:32.995271 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:33.494729 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:33.995243 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:34.496260 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:34.995218 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:35.494477 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:35.995316 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:36.495115 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:36.994832 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:37.495291 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:37.995028 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:38.494895 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:38.994368 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:39.495699 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:39.994753 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:40.494802 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:40.994721 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:41.494599 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:41.995283 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:42.495423 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:42.994641 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:43.495805 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:43.994426 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:44.495685 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:44.994965 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:45.494496 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:45.995439 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:46.494491 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:46.995590 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:47.494634 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:47.995058 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:48.494831 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:48.995547 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:49.495746 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:49.994994 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:50.494611 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:50.994892 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:51.494373 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:51.995399 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:52.495612 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:52.995912 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:53.495248 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:53.995302 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:54.495786 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:54.994960 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:55.495109 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:55.994763 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:56.495845 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:56.994276 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:57.494716 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:57.995610 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:58.495383 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:58.994443 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:59.495497 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:46:59.995490 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:00.496184 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:00.994511 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:01.494634 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:01.994436 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:02.495505 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:02.994991 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:03.494658 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:03.995479 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:04.495394 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:04.996029 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:05.494952 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:05.994243 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:06.498296 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:06.994913 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:07.494286 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:07.994963 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:08.494943 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:08.995120 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:09.501265 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:09.995651 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:10.495830 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:10.994761 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:11.494600 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:11.996858 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:12.494610 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:12.994983 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:13.494874 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:13.994474 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:14.521495 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:14.996596 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:15.495270 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:15.994943 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:16.495495 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:16.995512 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:17.495131 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:17.994654 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:18.495210 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:18.994431 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:19.494853 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:19.994705 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:20.495397 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:20.995007 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:21.494273 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:21.995551 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:22.495639 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:22.995117 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:23.494968 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:23.994412 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:24.495400 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:24.996076 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:25.495160 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:25.995332 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:26.495542 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:26.994881 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:27.494869 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:27.995157 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:28.495544 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:28.995281 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:29.495613 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:29.997746 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:30.495592 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:30.995359 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:31.494863 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:31.994213 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:32.495389 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:32.995069 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:33.494760 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:33.994472 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:34.494923 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:34.996013 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:35.494603 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:35.995760 8307 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0920 16:47:36.494742 8307 kapi.go:107] duration metric: took 2m30.003509007s to wait for kubernetes.io/minikube-addons=gcp-auth ...
I0920 16:47:36.496805 8307 out.go:177] * Your GCP credentials will now be mounted into every pod created in the addons-877987 cluster.
I0920 16:47:36.499379 8307 out.go:177] * If you don't want your credentials mounted into a specific pod, add a label with the `gcp-auth-skip-secret` key to your pod configuration.
I0920 16:47:36.501607 8307 out.go:177] * If you want existing pods to be mounted with credentials, either recreate them or rerun addons enable with --refresh.
I0920 16:47:36.503581 8307 out.go:177] * Enabled addons: nvidia-device-plugin, volcano, cloud-spanner, storage-provisioner, ingress-dns, metrics-server, inspektor-gadget, yakd, storage-provisioner-rancher, volumesnapshots, registry, csi-hostpath-driver, ingress, gcp-auth
I0920 16:47:36.505559 8307 addons.go:510] duration metric: took 2m45.269176298s for enable addons: enabled=[nvidia-device-plugin volcano cloud-spanner storage-provisioner ingress-dns metrics-server inspektor-gadget yakd storage-provisioner-rancher volumesnapshots registry csi-hostpath-driver ingress gcp-auth]
I0920 16:47:36.505610 8307 start.go:246] waiting for cluster config update ...
I0920 16:47:36.505633 8307 start.go:255] writing updated cluster config ...
I0920 16:47:36.505927 8307 ssh_runner.go:195] Run: rm -f paused
I0920 16:47:36.839434 8307 start.go:600] kubectl: 1.31.1, cluster: 1.31.1 (minor skew: 0)
I0920 16:47:36.842034 8307 out.go:177] * Done! kubectl is now configured to use "addons-877987" cluster and "default" namespace by default
==> Docker <==
Sep 20 16:57:12 addons-877987 dockerd[1288]: time="2024-09-20T16:57:12.111121595Z" level=info msg="ignoring event" container=2d4f3ecd371d587526f3619e531f1a32114deadae6034f63bb345c7dd6517b2a module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 20 16:57:12 addons-877987 dockerd[1288]: time="2024-09-20T16:57:12.149801357Z" level=info msg="ignoring event" container=86e93463680c00e19a959848d9e7bec0d19f26e8c2d3372dd1bf81b7792d16d4 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 20 16:57:12 addons-877987 dockerd[1288]: time="2024-09-20T16:57:12.155694548Z" level=info msg="ignoring event" container=2c4474d96214a5b9812925e3c930cb6d2f1b1f19ed2042b5ad945fa9b0dfd3da module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 20 16:57:12 addons-877987 dockerd[1288]: time="2024-09-20T16:57:12.169114601Z" level=info msg="ignoring event" container=ff5151dfd70f983e9d2d4d5115c33368154fb3649ab974856b5887a1148c710d module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 20 16:57:12 addons-877987 dockerd[1288]: time="2024-09-20T16:57:12.193442768Z" level=info msg="ignoring event" container=1f525cbc21478b035c7f450f9e1ce317d844ca65635ac2ba67a1f327350fa30e module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 20 16:57:12 addons-877987 dockerd[1288]: time="2024-09-20T16:57:12.199913987Z" level=info msg="ignoring event" container=9aed4f1cb2be20360843bae6227424dbce9c5c631a431ffae5e0396459e5620f module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 20 16:57:12 addons-877987 dockerd[1288]: time="2024-09-20T16:57:12.199971472Z" level=info msg="ignoring event" container=1d98b1744270b7bfe8de1baea9efcf98c0ce9c4481fff1502c2f9c022f68aa10 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 20 16:57:12 addons-877987 dockerd[1288]: time="2024-09-20T16:57:12.207970353Z" level=info msg="ignoring event" container=d18b4e831ba0fc70f1b90c33002bba9688b79dda2c02cad9b4e85a2ba461d33d module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 20 16:57:12 addons-877987 dockerd[1288]: time="2024-09-20T16:57:12.452107911Z" level=info msg="ignoring event" container=9121123dcdf4f35ea5aa3e6dcc2530fc3f28f1d371793a55aacfbc7247f24d82 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 20 16:57:12 addons-877987 dockerd[1288]: time="2024-09-20T16:57:12.505675704Z" level=info msg="Attempting next endpoint for pull after error: Head \"https://gcr.io/v2/k8s-minikube/busybox/manifests/latest\": unauthorized: authentication failed" spanID=f0000207d445f43f traceID=bb3707071cea39e7303770ec4522f193
Sep 20 16:57:12 addons-877987 dockerd[1288]: time="2024-09-20T16:57:12.510694247Z" level=error msg="Handler for POST /v1.43/images/create returned error: Head \"https://gcr.io/v2/k8s-minikube/busybox/manifests/latest\": unauthorized: authentication failed" spanID=f0000207d445f43f traceID=bb3707071cea39e7303770ec4522f193
Sep 20 16:57:12 addons-877987 dockerd[1288]: time="2024-09-20T16:57:12.545389342Z" level=info msg="ignoring event" container=ac933261728a1569c1c0106e720235063700481d628289cceb7f66106bff06e3 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 20 16:57:12 addons-877987 dockerd[1288]: time="2024-09-20T16:57:12.571836771Z" level=info msg="ignoring event" container=1bacd7f5bd1752ce9f56dc6910476d9a80c4a0dc4fb9c9ce551b1e5029e4bc90 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 20 16:57:18 addons-877987 dockerd[1288]: time="2024-09-20T16:57:18.671478917Z" level=info msg="ignoring event" container=9d613eb796bbcb7b61b59874825356ef24b431b9a5b945fef2cec6fcc7b18509 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 20 16:57:18 addons-877987 dockerd[1288]: time="2024-09-20T16:57:18.712821162Z" level=info msg="ignoring event" container=f73972937ebe67bdeefc2f21b28deb3b0f465bfa541b914dc71dadbbce29b802 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 20 16:57:18 addons-877987 dockerd[1288]: time="2024-09-20T16:57:18.873393785Z" level=info msg="ignoring event" container=69f1d9f0d4382cb7e93b54914b09b269a1a99b2515602ac20481ff96a082117d module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 20 16:57:18 addons-877987 dockerd[1288]: time="2024-09-20T16:57:18.902734968Z" level=info msg="ignoring event" container=697285cfc8923bd46e879757680ec468321a1a78978e68fac4ed0f60647f7ef6 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 20 16:57:26 addons-877987 dockerd[1288]: time="2024-09-20T16:57:26.419758791Z" level=info msg="ignoring event" container=e5a533500f4b94d142b194c09f37a7e7ee5eddf5c8a0a5a63f52346b55d1f87c module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 20 16:57:26 addons-877987 dockerd[1288]: time="2024-09-20T16:57:26.534881761Z" level=info msg="ignoring event" container=c6032b475699b01866b1b92c0231890d6c545a30cfbe8e2ba2c78abcdf7c07f3 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 20 16:57:32 addons-877987 dockerd[1288]: time="2024-09-20T16:57:32.079582250Z" level=info msg="ignoring event" container=eb665ae20be3901fd4b10b49632c023ce24dd63f7d5beaddb5024f38b08e3b84 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 20 16:57:32 addons-877987 dockerd[1288]: time="2024-09-20T16:57:32.841567608Z" level=info msg="ignoring event" container=ed7fe1013f05d10aa9e47b9c65f635615a00278ddb6fdfa102d21bfcdf106e16 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 20 16:57:33 addons-877987 dockerd[1288]: time="2024-09-20T16:57:33.534250589Z" level=info msg="ignoring event" container=cd436dd64d8c0b14ac2aa4cd8cf47938781b4b3ee5399864fa282c6d660dbb9d module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 20 16:57:33 addons-877987 dockerd[1288]: time="2024-09-20T16:57:33.634116011Z" level=info msg="ignoring event" container=7d34777f741bc88a0329ff2c8195d06bc1021dfd80fc5906b368c3165581b254 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 20 16:57:33 addons-877987 dockerd[1288]: time="2024-09-20T16:57:33.767967380Z" level=info msg="ignoring event" container=e52d0c93971f9d9663084ad2d71aaa4296eb50cb683d75e854e5571d0d34477e module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 20 16:57:33 addons-877987 dockerd[1288]: time="2024-09-20T16:57:33.888660280Z" level=info msg="ignoring event" container=8e196733bf750c0ed6fc65450d08f3df59435b210f4362801ab955c9498a1b2d module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD
3f2756d32e2a1 gcr.io/k8s-minikube/gcp-auth-webhook@sha256:e6c5b3bc32072ea370d34c27836efd11b3519d25bd444c2a8efc339cff0e20fb 9 minutes ago Running gcp-auth 0 2dcb03eed51c4 gcp-auth-89d5ffd79-w7ggl
b96a36f62143c registry.k8s.io/ingress-nginx/controller@sha256:d5f8217feeac4887cb1ed21f27c2674e58be06bd8f5184cacea2a69abaf78dce 11 minutes ago Running controller 0 5266c9fb66cfe ingress-nginx-controller-bc57996ff-vh9tp
fdecd440fe26f registry.k8s.io/ingress-nginx/kube-webhook-certgen@sha256:a320a50cc91bd15fd2d6fa6de58bd98c1bd64b9a6f926ce23a600d87043455a3 11 minutes ago Exited patch 0 77894f5e64ded ingress-nginx-admission-patch-wrlgh
063206699a14b registry.k8s.io/ingress-nginx/kube-webhook-certgen@sha256:a320a50cc91bd15fd2d6fa6de58bd98c1bd64b9a6f926ce23a600d87043455a3 11 minutes ago Exited create 0 47581dbbca3a9 ingress-nginx-admission-create-bgvbz
9bf3b4102ceb7 marcnuri/yakd@sha256:c5414196116a2266ad097b0468833b73ef1d6c7922241115fe203fb826381624 12 minutes ago Running yakd 0 002af133052de yakd-dashboard-67d98fc6b-k78vc
efc1160507a15 rancher/local-path-provisioner@sha256:e34c88ae0affb1cdefbb874140d6339d4a27ec4ee420ae8199cd839997b05246 12 minutes ago Running local-path-provisioner 0 2a9c76b446190 local-path-provisioner-86d989889c-4cqvt
9df7b2d11d52d gcr.io/k8s-minikube/minikube-ingress-dns@sha256:4211a1de532376c881851542238121b26792225faa36a7b02dccad88fd05797c 12 minutes ago Running minikube-ingress-dns 0 0bef5d5859fbc kube-ingress-dns-minikube
ec5f517850414 gcr.io/cloud-spanner-emulator/emulator@sha256:636fdfc528824bae5f0ea2eca6ae307fe81092f05ec21038008bc0d6100e52fc 12 minutes ago Running cloud-spanner-emulator 0 d961f1d35750d cloud-spanner-emulator-769b77f747-9sj7c
5ca8750f219f6 nvcr.io/nvidia/k8s-device-plugin@sha256:ed39e22c8b71343fb996737741a99da88ce6c75dd83b5c520e0b3d8e8a884c47 12 minutes ago Running nvidia-device-plugin-ctr 0 09e31ac874e26 nvidia-device-plugin-daemonset-wrczs
30d1039837d31 ba04bb24b9575 12 minutes ago Running storage-provisioner 0 adb7188809498 storage-provisioner
4d29b087056f2 2f6c962e7b831 12 minutes ago Running coredns 0 eb937a1327c9b coredns-7c65d6cfc9-v86mg
b2517fcb15811 24a140c548c07 12 minutes ago Running kube-proxy 0 1066775f0776f kube-proxy-hxdck
c74d0c39b85d1 d3f53a98c0a9d 12 minutes ago Running kube-apiserver 0 8620ad9d9d533 kube-apiserver-addons-877987
8e3c2eb108002 27e3830e14027 12 minutes ago Running etcd 0 d0b09bedccefa etcd-addons-877987
36348c169e3a8 279f381cb3736 12 minutes ago Running kube-controller-manager 0 6c4dad70501bb kube-controller-manager-addons-877987
f3d7f7ae712af 7f8aa378bb47d 12 minutes ago Running kube-scheduler 0 1f68acf3a5676 kube-scheduler-addons-877987
==> controller_ingress [b96a36f62143] <==
NGINX Ingress controller
Release: v1.11.2
Build: 46e76e5916813cfca2a9b0bfdc34b69a0000f6b9
Repository: https://github.com/kubernetes/ingress-nginx
nginx version: nginx/1.25.5
-------------------------------------------------------------------------------
I0920 16:46:12.789989 7 main.go:248] "Running in Kubernetes cluster" major="1" minor="31" git="v1.31.1" state="clean" commit="948afe5ca072329a73c8e79ed5938717a5cb3d21" platform="linux/arm64"
I0920 16:46:13.168078 7 main.go:101] "SSL fake certificate created" file="/etc/ingress-controller/ssl/default-fake-certificate.pem"
I0920 16:46:13.188429 7 ssl.go:535] "loading tls certificate" path="/usr/local/certificates/cert" key="/usr/local/certificates/key"
I0920 16:46:13.199743 7 nginx.go:271] "Starting NGINX Ingress controller"
I0920 16:46:13.221833 7 event.go:377] Event(v1.ObjectReference{Kind:"ConfigMap", Namespace:"ingress-nginx", Name:"ingress-nginx-controller", UID:"6de0e728-5a46-4b37-85fb-df1ed2391769", APIVersion:"v1", ResourceVersion:"703", FieldPath:""}): type: 'Normal' reason: 'CREATE' ConfigMap ingress-nginx/ingress-nginx-controller
I0920 16:46:13.230116 7 event.go:377] Event(v1.ObjectReference{Kind:"ConfigMap", Namespace:"ingress-nginx", Name:"tcp-services", UID:"46d6e11b-1b13-4272-b46e-9f47a3d8cdac", APIVersion:"v1", ResourceVersion:"706", FieldPath:""}): type: 'Normal' reason: 'CREATE' ConfigMap ingress-nginx/tcp-services
I0920 16:46:13.230433 7 event.go:377] Event(v1.ObjectReference{Kind:"ConfigMap", Namespace:"ingress-nginx", Name:"udp-services", UID:"6ae4715f-6667-47b4-9cae-a714cb9916d4", APIVersion:"v1", ResourceVersion:"707", FieldPath:""}): type: 'Normal' reason: 'CREATE' ConfigMap ingress-nginx/udp-services
I0920 16:46:14.402556 7 nginx.go:317] "Starting NGINX process"
I0920 16:46:14.402832 7 leaderelection.go:250] attempting to acquire leader lease ingress-nginx/ingress-nginx-leader...
I0920 16:46:14.403007 7 nginx.go:337] "Starting validation webhook" address=":8443" certPath="/usr/local/certificates/cert" keyPath="/usr/local/certificates/key"
I0920 16:46:14.403367 7 controller.go:193] "Configuration changes detected, backend reload required"
I0920 16:46:14.413397 7 leaderelection.go:260] successfully acquired lease ingress-nginx/ingress-nginx-leader
I0920 16:46:14.414075 7 status.go:85] "New leader elected" identity="ingress-nginx-controller-bc57996ff-vh9tp"
I0920 16:46:14.427134 7 status.go:219] "POD is not ready" pod="ingress-nginx/ingress-nginx-controller-bc57996ff-vh9tp" node="addons-877987"
I0920 16:46:14.447737 7 controller.go:213] "Backend successfully reloaded"
I0920 16:46:14.447809 7 controller.go:224] "Initial sync, sleeping for 1 second"
I0920 16:46:14.447956 7 event.go:377] Event(v1.ObjectReference{Kind:"Pod", Namespace:"ingress-nginx", Name:"ingress-nginx-controller-bc57996ff-vh9tp", UID:"5b31605c-ae08-4755-8868-bd7183ac9d43", APIVersion:"v1", ResourceVersion:"735", FieldPath:""}): type: 'Normal' reason: 'RELOAD' NGINX reload triggered due to a change in configuration
==> coredns [4d29b087056f] <==
.:53
[INFO] plugin/reload: Running configuration SHA512 = 591cf328cccc12bc490481273e738df59329c62c0b729d94e8b61db9961c2fa5f046dd37f1cf888b953814040d180f52594972691cd6ff41be96639138a43908
CoreDNS-1.11.3
linux/arm64, go1.21.11, a6338e9
[INFO] Reloading
[INFO] plugin/reload: Running configuration SHA512 = 05e3eaddc414b2d71a69b2e2bc6f2681fc1f4d04bcdd3acc1a41457bb7db518208b95ddfc4c9fffedc59c25a8faf458be1af4915a4a3c0d6777cb7a346bc5d86
[INFO] Reloading complete
[INFO] 127.0.0.1:33396 - 15335 "HINFO IN 7048061253374173460.3195082509755045569. udp 57 false 512" NXDOMAIN qr,rd,ra 57 0.038209005s
[INFO] 10.244.0.25:54965 - 32540 "AAAA IN storage.googleapis.com.gcp-auth.svc.cluster.local. udp 78 false 1232" NXDOMAIN qr,aa,rd 160 0.000292119s
[INFO] 10.244.0.25:37161 - 43504 "A IN storage.googleapis.com.gcp-auth.svc.cluster.local. udp 78 false 1232" NXDOMAIN qr,aa,rd 160 0.00012683s
[INFO] 10.244.0.25:58619 - 37862 "AAAA IN storage.googleapis.com.svc.cluster.local. udp 69 false 1232" NXDOMAIN qr,aa,rd 151 0.000125706s
[INFO] 10.244.0.25:45279 - 17498 "A IN storage.googleapis.com.svc.cluster.local. udp 69 false 1232" NXDOMAIN qr,aa,rd 151 0.000102542s
[INFO] 10.244.0.25:40301 - 2204 "AAAA IN storage.googleapis.com.cluster.local. udp 65 false 1232" NXDOMAIN qr,aa,rd 147 0.000137226s
[INFO] 10.244.0.25:57578 - 61190 "A IN storage.googleapis.com.cluster.local. udp 65 false 1232" NXDOMAIN qr,aa,rd 147 0.000105717s
[INFO] 10.244.0.25:59894 - 54813 "AAAA IN storage.googleapis.com.us-east-2.compute.internal. udp 78 false 1232" NXDOMAIN qr,rd,ra 67 0.002208615s
[INFO] 10.244.0.25:48563 - 46260 "A IN storage.googleapis.com.us-east-2.compute.internal. udp 78 false 1232" NXDOMAIN qr,rd,ra 67 0.002602159s
[INFO] 10.244.0.25:35868 - 48728 "AAAA IN storage.googleapis.com. udp 51 false 1232" NOERROR qr,rd,ra 240 0.00297565s
[INFO] 10.244.0.25:45646 - 49671 "A IN storage.googleapis.com. udp 51 false 1232" NOERROR qr,rd,ra 648 0.003086011s
==> describe nodes <==
Name: addons-877987
Roles: control-plane
Labels: beta.kubernetes.io/arch=arm64
beta.kubernetes.io/os=linux
kubernetes.io/arch=arm64
kubernetes.io/hostname=addons-877987
kubernetes.io/os=linux
minikube.k8s.io/commit=0626f22cf0d915d75e291a5bce701f94395056e1
minikube.k8s.io/name=addons-877987
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2024_09_20T16_44_46_0700
minikube.k8s.io/version=v1.34.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
topology.hostpath.csi/node=addons-877987
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///var/run/cri-dockerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Fri, 20 Sep 2024 16:44:43 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: addons-877987
AcquireTime: <unset>
RenewTime: Fri, 20 Sep 2024 16:57:31 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Fri, 20 Sep 2024 16:56:51 +0000 Fri, 20 Sep 2024 16:44:39 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Fri, 20 Sep 2024 16:56:51 +0000 Fri, 20 Sep 2024 16:44:39 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Fri, 20 Sep 2024 16:56:51 +0000 Fri, 20 Sep 2024 16:44:39 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Fri, 20 Sep 2024 16:56:51 +0000 Fri, 20 Sep 2024 16:44:43 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.49.2
Hostname: addons-877987
Capacity:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022296Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 203034800Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
hugepages-32Mi: 0
hugepages-64Ki: 0
memory: 8022296Ki
pods: 110
System Info:
Machine ID: aa8de342cbf64be7abfbf8f5433a0ce7
System UUID: 12ec6b84-be42-4491-bd8d-cc388ab37e23
Boot ID: cfeac633-1b4b-4878-a7d1-bdd76da68a0f
Kernel Version: 5.15.0-1070-aws
OS Image: Ubuntu 22.04.5 LTS
Operating System: linux
Architecture: arm64
Container Runtime Version: docker://27.3.0
Kubelet Version: v1.31.1
Kube-Proxy Version: v1.31.1
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (15 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 9m16s
default cloud-spanner-emulator-769b77f747-9sj7c 0 (0%) 0 (0%) 0 (0%) 0 (0%) 12m
gcp-auth gcp-auth-89d5ffd79-w7ggl 0 (0%) 0 (0%) 0 (0%) 0 (0%) 11m
ingress-nginx ingress-nginx-controller-bc57996ff-vh9tp 100m (5%) 0 (0%) 90Mi (1%) 0 (0%) 12m
kube-system coredns-7c65d6cfc9-v86mg 100m (5%) 0 (0%) 70Mi (0%) 170Mi (2%) 12m
kube-system etcd-addons-877987 100m (5%) 0 (0%) 100Mi (1%) 0 (0%) 12m
kube-system kube-apiserver-addons-877987 250m (12%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system kube-controller-manager-addons-877987 200m (10%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system kube-ingress-dns-minikube 0 (0%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system kube-proxy-hxdck 0 (0%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system kube-scheduler-addons-877987 100m (5%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system nvidia-device-plugin-daemonset-wrczs 0 (0%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 12m
local-path-storage local-path-provisioner-86d989889c-4cqvt 0 (0%) 0 (0%) 0 (0%) 0 (0%) 12m
yakd-dashboard yakd-dashboard-67d98fc6b-k78vc 0 (0%) 0 (0%) 128Mi (1%) 256Mi (3%) 12m
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 850m (42%) 0 (0%)
memory 388Mi (4%) 426Mi (5%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
hugepages-32Mi 0 (0%) 0 (0%)
hugepages-64Ki 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 12m kube-proxy
Normal NodeAllocatableEnforced 12m kubelet Updated Node Allocatable limit across pods
Warning CgroupV1 12m kubelet Cgroup v1 support is in maintenance mode, please migrate to Cgroup v2.
Normal NodeHasSufficientMemory 12m (x8 over 12m) kubelet Node addons-877987 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 12m (x7 over 12m) kubelet Node addons-877987 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 12m (x7 over 12m) kubelet Node addons-877987 status is now: NodeHasSufficientPID
Normal Starting 12m kubelet Starting kubelet.
Normal Starting 12m kubelet Starting kubelet.
Warning CgroupV1 12m kubelet Cgroup v1 support is in maintenance mode, please migrate to Cgroup v2.
Normal NodeAllocatableEnforced 12m kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 12m kubelet Node addons-877987 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 12m kubelet Node addons-877987 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 12m kubelet Node addons-877987 status is now: NodeHasSufficientPID
Normal RegisteredNode 12m node-controller Node addons-877987 event: Registered Node addons-877987 in Controller
==> dmesg <==
[Sep20 16:17] ACPI: SRAT not present
[ +0.000000] ACPI: SRAT not present
[ +0.000000] SPI driver altr_a10sr has no spi_device_id for altr,a10sr
[ +0.014742] device-mapper: core: CONFIG_IMA_DISABLE_HTABLE is disabled. Duplicate IMA measurements will not be recorded in the IMA log.
[ +0.507055] systemd[1]: Configuration file /run/systemd/system/netplan-ovs-cleanup.service is marked world-inaccessible. This has no effect as configuration data is accessible via APIs without restrictions. Proceeding anyway.
[ +0.803986] ena 0000:00:05.0: LLQ is not supported Fallback to host mode policy.
[ +6.089572] kauditd_printk_skb: 36 callbacks suppressed
==> etcd [8e3c2eb10800] <==
{"level":"info","ts":"2024-09-20T16:44:39.222385Z","caller":"embed/etcd.go:571","msg":"cmux::serve","address":"192.168.49.2:2380"}
{"level":"info","ts":"2024-09-20T16:44:39.217615Z","caller":"embed/etcd.go:870","msg":"serving metrics","address":"http://127.0.0.1:2381"}
{"level":"info","ts":"2024-09-20T16:44:39.274390Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc is starting a new election at term 1"}
{"level":"info","ts":"2024-09-20T16:44:39.274436Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became pre-candidate at term 1"}
{"level":"info","ts":"2024-09-20T16:44:39.274460Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc received MsgPreVoteResp from aec36adc501070cc at term 1"}
{"level":"info","ts":"2024-09-20T16:44:39.274481Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became candidate at term 2"}
{"level":"info","ts":"2024-09-20T16:44:39.274487Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc received MsgVoteResp from aec36adc501070cc at term 2"}
{"level":"info","ts":"2024-09-20T16:44:39.274498Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became leader at term 2"}
{"level":"info","ts":"2024-09-20T16:44:39.274505Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: aec36adc501070cc elected leader aec36adc501070cc at term 2"}
{"level":"info","ts":"2024-09-20T16:44:39.281355Z","caller":"etcdserver/server.go:2629","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2024-09-20T16:44:39.286547Z","caller":"etcdserver/server.go:2118","msg":"published local member to cluster through raft","local-member-id":"aec36adc501070cc","local-member-attributes":"{Name:addons-877987 ClientURLs:[https://192.168.49.2:2379]}","request-path":"/0/members/aec36adc501070cc/attributes","cluster-id":"fa54960ea34d58be","publish-timeout":"7s"}
{"level":"info","ts":"2024-09-20T16:44:39.286577Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2024-09-20T16:44:39.294411Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"fa54960ea34d58be","local-member-id":"aec36adc501070cc","cluster-version":"3.5"}
{"level":"info","ts":"2024-09-20T16:44:39.294511Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2024-09-20T16:44:39.294535Z","caller":"etcdserver/server.go:2653","msg":"cluster version is updated","cluster-version":"3.5"}
{"level":"info","ts":"2024-09-20T16:44:39.294419Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2024-09-20T16:44:39.295061Z","caller":"v3rpc/health.go:61","msg":"grpc service status changed","service":"","status":"SERVING"}
{"level":"info","ts":"2024-09-20T16:44:39.295233Z","caller":"v3rpc/health.go:61","msg":"grpc service status changed","service":"","status":"SERVING"}
{"level":"info","ts":"2024-09-20T16:44:39.295997Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"info","ts":"2024-09-20T16:44:39.296133Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.49.2:2379"}
{"level":"info","ts":"2024-09-20T16:44:39.302440Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2024-09-20T16:44:39.302472Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2024-09-20T16:54:40.959788Z","caller":"mvcc/index.go:214","msg":"compact tree index","revision":1879}
{"level":"info","ts":"2024-09-20T16:54:41.008627Z","caller":"mvcc/kvstore_compaction.go:69","msg":"finished scheduled compaction","compact-revision":1879,"took":"47.761985ms","hash":1115996737,"current-db-size-bytes":8843264,"current-db-size":"8.8 MB","current-db-size-in-use-bytes":4960256,"current-db-size-in-use":"5.0 MB"}
{"level":"info","ts":"2024-09-20T16:54:41.008684Z","caller":"mvcc/hash.go:137","msg":"storing new hash","hash":1115996737,"revision":1879,"compact-revision":-1}
==> gcp-auth [3f2756d32e2a] <==
2024/09/20 16:47:36 GCP Auth Webhook started!
2024/09/20 16:47:54 Ready to marshal response ...
2024/09/20 16:47:54 Ready to write response ...
2024/09/20 16:47:54 Ready to marshal response ...
2024/09/20 16:47:54 Ready to write response ...
2024/09/20 16:48:18 Ready to marshal response ...
2024/09/20 16:48:18 Ready to write response ...
2024/09/20 16:48:18 Ready to marshal response ...
2024/09/20 16:48:18 Ready to write response ...
2024/09/20 16:48:18 Ready to marshal response ...
2024/09/20 16:48:18 Ready to write response ...
2024/09/20 16:56:22 Ready to marshal response ...
2024/09/20 16:56:22 Ready to write response ...
2024/09/20 16:56:22 Ready to marshal response ...
2024/09/20 16:56:22 Ready to write response ...
2024/09/20 16:56:22 Ready to marshal response ...
2024/09/20 16:56:22 Ready to write response ...
2024/09/20 16:56:32 Ready to marshal response ...
2024/09/20 16:56:32 Ready to write response ...
2024/09/20 16:56:41 Ready to marshal response ...
2024/09/20 16:56:41 Ready to write response ...
2024/09/20 16:57:02 Ready to marshal response ...
2024/09/20 16:57:02 Ready to write response ...
==> kernel <==
16:57:35 up 40 min, 0 users, load average: 0.33, 0.49, 0.47
Linux addons-877987 5.15.0-1070-aws #76~20.04.1-Ubuntu SMP Mon Sep 2 12:20:48 UTC 2024 aarch64 aarch64 aarch64 GNU/Linux
PRETTY_NAME="Ubuntu 22.04.5 LTS"
==> kube-apiserver [c74d0c39b85d] <==
I0920 16:48:09.488053 1 handler.go:286] Adding GroupVersion flow.volcano.sh v1alpha1 to ResourceManager
W0920 16:48:09.850804 1 cacher.go:171] Terminating all watchers from cacher podgroups.scheduling.volcano.sh
W0920 16:48:10.033100 1 cacher.go:171] Terminating all watchers from cacher jobs.batch.volcano.sh
W0920 16:48:10.132268 1 cacher.go:171] Terminating all watchers from cacher numatopologies.nodeinfo.volcano.sh
W0920 16:48:10.198461 1 cacher.go:171] Terminating all watchers from cacher queues.scheduling.volcano.sh
W0920 16:48:10.488313 1 cacher.go:171] Terminating all watchers from cacher jobflows.flow.volcano.sh
W0920 16:48:10.736565 1 cacher.go:171] Terminating all watchers from cacher jobtemplates.flow.volcano.sh
I0920 16:56:22.219183 1 alloc.go:330] "allocated clusterIPs" service="headlamp/headlamp" clusterIPs={"IPv4":"10.109.65.184"}
I0920 16:56:48.574728 1 controller.go:615] quota admission added evaluator for: volumesnapshots.snapshot.storage.k8s.io
E0920 16:56:50.475196 1 watch.go:250] "Unhandled Error" err="http2: stream closed" logger="UnhandledError"
I0920 16:57:18.386166 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0920 16:57:18.386219 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0920 16:57:18.405670 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0920 16:57:18.405960 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0920 16:57:18.455161 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0920 16:57:18.455219 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0920 16:57:18.465024 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0920 16:57:18.465877 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0920 16:57:18.537288 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0920 16:57:18.537366 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
W0920 16:57:19.465709 1 cacher.go:171] Terminating all watchers from cacher volumesnapshotcontents.snapshot.storage.k8s.io
W0920 16:57:19.544320 1 cacher.go:171] Terminating all watchers from cacher volumesnapshots.snapshot.storage.k8s.io
W0920 16:57:19.562657 1 cacher.go:171] Terminating all watchers from cacher volumesnapshotclasses.snapshot.storage.k8s.io
I0920 16:57:31.993584 1 handler.go:286] Adding GroupVersion gadget.kinvolk.io v1alpha1 to ResourceManager
W0920 16:57:33.020308 1 cacher.go:171] Terminating all watchers from cacher traces.gadget.kinvolk.io
==> kube-controller-manager [36348c169e3a] <==
I0920 16:57:20.965778 1 shared_informer.go:313] Waiting for caches to sync for garbage collector
I0920 16:57:20.965830 1 shared_informer.go:320] Caches are synced for garbage collector
W0920 16:57:21.051379 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0920 16:57:21.051438 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0920 16:57:21.060453 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0920 16:57:21.060501 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0920 16:57:22.891335 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0920 16:57:22.891377 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0920 16:57:23.656604 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0920 16:57:23.656647 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0920 16:57:23.870896 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0920 16:57:23.870942 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
I0920 16:57:25.317079 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/metrics-server-84c5f94fbc" duration="4.701µs"
W0920 16:57:25.528042 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0920 16:57:25.528086 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0920 16:57:27.340374 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0920 16:57:27.340421 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0920 16:57:27.377899 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0920 16:57:27.377942 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0920 16:57:29.279972 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0920 16:57:29.280031 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
E0920 16:57:33.022072 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
I0920 16:57:33.456567 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/registry-66c9cd494c" duration="4.184µs"
W0920 16:57:34.140435 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0920 16:57:34.140481 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
==> kube-proxy [b2517fcb1581] <==
I0920 16:44:52.297204 1 server_linux.go:66] "Using iptables proxy"
I0920 16:44:52.405215 1 server.go:677] "Successfully retrieved node IP(s)" IPs=["192.168.49.2"]
E0920 16:44:52.405274 1 server.go:234] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I0920 16:44:52.463833 1 server.go:243] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I0920 16:44:52.464015 1 server_linux.go:169] "Using iptables Proxier"
I0920 16:44:52.466243 1 proxier.go:255] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I0920 16:44:52.466637 1 server.go:483] "Version info" version="v1.31.1"
I0920 16:44:52.466653 1 server.go:485] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I0920 16:44:52.486341 1 config.go:199] "Starting service config controller"
I0920 16:44:52.486373 1 shared_informer.go:313] Waiting for caches to sync for service config
I0920 16:44:52.486399 1 config.go:105] "Starting endpoint slice config controller"
I0920 16:44:52.486403 1 shared_informer.go:313] Waiting for caches to sync for endpoint slice config
I0920 16:44:52.487920 1 config.go:328] "Starting node config controller"
I0920 16:44:52.487933 1 shared_informer.go:313] Waiting for caches to sync for node config
I0920 16:44:52.587459 1 shared_informer.go:320] Caches are synced for endpoint slice config
I0920 16:44:52.587515 1 shared_informer.go:320] Caches are synced for service config
I0920 16:44:52.588316 1 shared_informer.go:320] Caches are synced for node config
==> kube-scheduler [f3d7f7ae712a] <==
W0920 16:44:43.893201 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E0920 16:44:43.893246 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User \"system:kube-scheduler\" cannot list resource \"pods\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0920 16:44:43.894208 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
E0920 16:44:43.894245 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"statefulsets\" in API group \"apps\" at the cluster scope" logger="UnhandledError"
W0920 16:44:43.894530 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
E0920 16:44:43.894558 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError"
W0920 16:44:43.894635 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E0920 16:44:43.894651 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"storageclasses\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
W0920 16:44:43.894722 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope
E0920 16:44:43.894748 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User \"system:kube-scheduler\" cannot list resource \"nodes\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0920 16:44:43.894810 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope
E0920 16:44:43.894838 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csinodes\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
W0920 16:44:43.894909 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E0920 16:44:43.894930 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User \"system:kube-scheduler\" cannot list resource \"services\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0920 16:44:43.895005 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E0920 16:44:43.895024 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0920 16:44:43.895101 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E0920 16:44:43.895119 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
W0920 16:44:43.895183 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
E0920 16:44:43.895198 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicationcontrollers\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0920 16:44:43.895407 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
E0920 16:44:43.895430 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0920 16:44:44.763941 1 reflector.go:561] runtime/asm_arm64.s:1222: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
E0920 16:44:44.763992 1 reflector.go:158] "Unhandled Error" err="runtime/asm_arm64.s:1222: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"extension-apiserver-authentication\" is forbidden: User \"system:kube-scheduler\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\"" logger="UnhandledError"
I0920 16:44:46.382684 1 shared_informer.go:320] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Sep 20 16:57:32 addons-877987 kubelet[2348]: I0920 16:57:32.423317 2348 reconciler_common.go:288] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0e0e8336-c68e-4cca-9118-71a3bca51144-host\") on node \"addons-877987\" DevicePath \"\""
Sep 20 16:57:32 addons-877987 kubelet[2348]: I0920 16:57:32.423328 2348 reconciler_common.go:288] "Volume detached for volume \"kube-api-access-sjpgv\" (UniqueName: \"kubernetes.io/projected/0e0e8336-c68e-4cca-9118-71a3bca51144-kube-api-access-sjpgv\") on node \"addons-877987\" DevicePath \"\""
Sep 20 16:57:32 addons-877987 kubelet[2348]: I0920 16:57:32.423337 2348 reconciler_common.go:288] "Volume detached for volume \"debugfs\" (UniqueName: \"kubernetes.io/host-path/0e0e8336-c68e-4cca-9118-71a3bca51144-debugfs\") on node \"addons-877987\" DevicePath \"\""
Sep 20 16:57:32 addons-877987 kubelet[2348]: I0920 16:57:32.935540 2348 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9rwxg\" (UniqueName: \"kubernetes.io/projected/b3be4a97-bce2-4ea1-b338-c56f1b373bfc-kube-api-access-9rwxg\") pod \"b3be4a97-bce2-4ea1-b338-c56f1b373bfc\" (UID: \"b3be4a97-bce2-4ea1-b338-c56f1b373bfc\") "
Sep 20 16:57:32 addons-877987 kubelet[2348]: I0920 16:57:32.936048 2348 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"gcp-creds\" (UniqueName: \"kubernetes.io/host-path/b3be4a97-bce2-4ea1-b338-c56f1b373bfc-gcp-creds\") pod \"b3be4a97-bce2-4ea1-b338-c56f1b373bfc\" (UID: \"b3be4a97-bce2-4ea1-b338-c56f1b373bfc\") "
Sep 20 16:57:32 addons-877987 kubelet[2348]: I0920 16:57:32.936295 2348 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b3be4a97-bce2-4ea1-b338-c56f1b373bfc-gcp-creds" (OuterVolumeSpecName: "gcp-creds") pod "b3be4a97-bce2-4ea1-b338-c56f1b373bfc" (UID: "b3be4a97-bce2-4ea1-b338-c56f1b373bfc"). InnerVolumeSpecName "gcp-creds". PluginName "kubernetes.io/host-path", VolumeGidValue ""
Sep 20 16:57:32 addons-877987 kubelet[2348]: I0920 16:57:32.938232 2348 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b3be4a97-bce2-4ea1-b338-c56f1b373bfc-kube-api-access-9rwxg" (OuterVolumeSpecName: "kube-api-access-9rwxg") pod "b3be4a97-bce2-4ea1-b338-c56f1b373bfc" (UID: "b3be4a97-bce2-4ea1-b338-c56f1b373bfc"). InnerVolumeSpecName "kube-api-access-9rwxg". PluginName "kubernetes.io/projected", VolumeGidValue ""
Sep 20 16:57:33 addons-877987 kubelet[2348]: I0920 16:57:33.036891 2348 reconciler_common.go:288] "Volume detached for volume \"kube-api-access-9rwxg\" (UniqueName: \"kubernetes.io/projected/b3be4a97-bce2-4ea1-b338-c56f1b373bfc-kube-api-access-9rwxg\") on node \"addons-877987\" DevicePath \"\""
Sep 20 16:57:33 addons-877987 kubelet[2348]: I0920 16:57:33.036935 2348 reconciler_common.go:288] "Volume detached for volume \"gcp-creds\" (UniqueName: \"kubernetes.io/host-path/b3be4a97-bce2-4ea1-b338-c56f1b373bfc-gcp-creds\") on node \"addons-877987\" DevicePath \"\""
Sep 20 16:57:33 addons-877987 kubelet[2348]: I0920 16:57:33.946716 2348 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-txtk8\" (UniqueName: \"kubernetes.io/projected/2a3a6aaa-b147-4517-bdc2-529c58ed2d26-kube-api-access-txtk8\") pod \"2a3a6aaa-b147-4517-bdc2-529c58ed2d26\" (UID: \"2a3a6aaa-b147-4517-bdc2-529c58ed2d26\") "
Sep 20 16:57:33 addons-877987 kubelet[2348]: I0920 16:57:33.952378 2348 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a3a6aaa-b147-4517-bdc2-529c58ed2d26-kube-api-access-txtk8" (OuterVolumeSpecName: "kube-api-access-txtk8") pod "2a3a6aaa-b147-4517-bdc2-529c58ed2d26" (UID: "2a3a6aaa-b147-4517-bdc2-529c58ed2d26"). InnerVolumeSpecName "kube-api-access-txtk8". PluginName "kubernetes.io/projected", VolumeGidValue ""
Sep 20 16:57:34 addons-877987 kubelet[2348]: I0920 16:57:34.048775 2348 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5fpp2\" (UniqueName: \"kubernetes.io/projected/c8ba5e64-c35c-4fdb-8dfb-ede028619b44-kube-api-access-5fpp2\") pod \"c8ba5e64-c35c-4fdb-8dfb-ede028619b44\" (UID: \"c8ba5e64-c35c-4fdb-8dfb-ede028619b44\") "
Sep 20 16:57:34 addons-877987 kubelet[2348]: I0920 16:57:34.048912 2348 reconciler_common.go:288] "Volume detached for volume \"kube-api-access-txtk8\" (UniqueName: \"kubernetes.io/projected/2a3a6aaa-b147-4517-bdc2-529c58ed2d26-kube-api-access-txtk8\") on node \"addons-877987\" DevicePath \"\""
Sep 20 16:57:34 addons-877987 kubelet[2348]: I0920 16:57:34.059497 2348 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c8ba5e64-c35c-4fdb-8dfb-ede028619b44-kube-api-access-5fpp2" (OuterVolumeSpecName: "kube-api-access-5fpp2") pod "c8ba5e64-c35c-4fdb-8dfb-ede028619b44" (UID: "c8ba5e64-c35c-4fdb-8dfb-ede028619b44"). InnerVolumeSpecName "kube-api-access-5fpp2". PluginName "kubernetes.io/projected", VolumeGidValue ""
Sep 20 16:57:34 addons-877987 kubelet[2348]: I0920 16:57:34.149736 2348 reconciler_common.go:288] "Volume detached for volume \"kube-api-access-5fpp2\" (UniqueName: \"kubernetes.io/projected/c8ba5e64-c35c-4fdb-8dfb-ede028619b44-kube-api-access-5fpp2\") on node \"addons-877987\" DevicePath \"\""
Sep 20 16:57:34 addons-877987 kubelet[2348]: I0920 16:57:34.243153 2348 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0e0e8336-c68e-4cca-9118-71a3bca51144" path="/var/lib/kubelet/pods/0e0e8336-c68e-4cca-9118-71a3bca51144/volumes"
Sep 20 16:57:34 addons-877987 kubelet[2348]: I0920 16:57:34.243620 2348 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b3be4a97-bce2-4ea1-b338-c56f1b373bfc" path="/var/lib/kubelet/pods/b3be4a97-bce2-4ea1-b338-c56f1b373bfc/volumes"
Sep 20 16:57:34 addons-877987 kubelet[2348]: I0920 16:57:34.379627 2348 scope.go:117] "RemoveContainer" containerID="cd436dd64d8c0b14ac2aa4cd8cf47938781b4b3ee5399864fa282c6d660dbb9d"
Sep 20 16:57:34 addons-877987 kubelet[2348]: I0920 16:57:34.432611 2348 scope.go:117] "RemoveContainer" containerID="cd436dd64d8c0b14ac2aa4cd8cf47938781b4b3ee5399864fa282c6d660dbb9d"
Sep 20 16:57:34 addons-877987 kubelet[2348]: E0920 16:57:34.433548 2348 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = Unknown desc = Error response from daemon: No such container: cd436dd64d8c0b14ac2aa4cd8cf47938781b4b3ee5399864fa282c6d660dbb9d" containerID="cd436dd64d8c0b14ac2aa4cd8cf47938781b4b3ee5399864fa282c6d660dbb9d"
Sep 20 16:57:34 addons-877987 kubelet[2348]: I0920 16:57:34.433586 2348 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"docker","ID":"cd436dd64d8c0b14ac2aa4cd8cf47938781b4b3ee5399864fa282c6d660dbb9d"} err="failed to get container status \"cd436dd64d8c0b14ac2aa4cd8cf47938781b4b3ee5399864fa282c6d660dbb9d\": rpc error: code = Unknown desc = Error response from daemon: No such container: cd436dd64d8c0b14ac2aa4cd8cf47938781b4b3ee5399864fa282c6d660dbb9d"
Sep 20 16:57:34 addons-877987 kubelet[2348]: I0920 16:57:34.433613 2348 scope.go:117] "RemoveContainer" containerID="7d34777f741bc88a0329ff2c8195d06bc1021dfd80fc5906b368c3165581b254"
Sep 20 16:57:34 addons-877987 kubelet[2348]: I0920 16:57:34.450228 2348 scope.go:117] "RemoveContainer" containerID="7d34777f741bc88a0329ff2c8195d06bc1021dfd80fc5906b368c3165581b254"
Sep 20 16:57:34 addons-877987 kubelet[2348]: E0920 16:57:34.451252 2348 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = Unknown desc = Error response from daemon: No such container: 7d34777f741bc88a0329ff2c8195d06bc1021dfd80fc5906b368c3165581b254" containerID="7d34777f741bc88a0329ff2c8195d06bc1021dfd80fc5906b368c3165581b254"
Sep 20 16:57:34 addons-877987 kubelet[2348]: I0920 16:57:34.451291 2348 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"docker","ID":"7d34777f741bc88a0329ff2c8195d06bc1021dfd80fc5906b368c3165581b254"} err="failed to get container status \"7d34777f741bc88a0329ff2c8195d06bc1021dfd80fc5906b368c3165581b254\": rpc error: code = Unknown desc = Error response from daemon: No such container: 7d34777f741bc88a0329ff2c8195d06bc1021dfd80fc5906b368c3165581b254"
==> storage-provisioner [30d1039837d3] <==
I0920 16:44:59.144596 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I0920 16:44:59.190855 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I0920 16:44:59.190963 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I0920 16:44:59.242703 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I0920 16:44:59.242900 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_addons-877987_e0f328b7-f5c1-4dec-879b-c5605a42c985!
I0920 16:44:59.242974 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"0de4a182-a193-4042-aa21-d5008f5727b2", APIVersion:"v1", ResourceVersion:"631", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' addons-877987_e0f328b7-f5c1-4dec-879b-c5605a42c985 became leader
I0920 16:44:59.362269 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_addons-877987_e0f328b7-f5c1-4dec-879b-c5605a42c985!
-- /stdout --
helpers_test.go:254: (dbg) Run: out/minikube-linux-arm64 status --format={{.APIServer}} -p addons-877987 -n addons-877987
helpers_test.go:261: (dbg) Run: kubectl --context addons-877987 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:272: non-running pods: busybox ingress-nginx-admission-create-bgvbz ingress-nginx-admission-patch-wrlgh
helpers_test.go:274: ======> post-mortem[TestAddons/parallel/Registry]: describe non-running pods <======
helpers_test.go:277: (dbg) Run: kubectl --context addons-877987 describe pod busybox ingress-nginx-admission-create-bgvbz ingress-nginx-admission-patch-wrlgh
helpers_test.go:277: (dbg) Non-zero exit: kubectl --context addons-877987 describe pod busybox ingress-nginx-admission-create-bgvbz ingress-nginx-admission-patch-wrlgh: exit status 1 (94.11342ms)
-- stdout --
Name: busybox
Namespace: default
Priority: 0
Service Account: default
Node: addons-877987/192.168.49.2
Start Time: Fri, 20 Sep 2024 16:48:18 +0000
Labels: integration-test=busybox
Annotations: <none>
Status: Pending
IP: 10.244.0.27
IPs:
IP: 10.244.0.27
Containers:
busybox:
Container ID:
Image: gcr.io/k8s-minikube/busybox:1.28.4-glibc
Image ID:
Port: <none>
Host Port: <none>
Command:
sleep
3600
State: Waiting
Reason: ImagePullBackOff
Ready: False
Restart Count: 0
Environment:
GOOGLE_APPLICATION_CREDENTIALS: /google-app-creds.json
PROJECT_ID: this_is_fake
GCP_PROJECT: this_is_fake
GCLOUD_PROJECT: this_is_fake
GOOGLE_CLOUD_PROJECT: this_is_fake
CLOUDSDK_CORE_PROJECT: this_is_fake
Mounts:
/google-app-creds.json from gcp-creds (ro)
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-nqr9d (ro)
Conditions:
Type Status
PodReadyToStartContainers True
Initialized True
Ready False
ContainersReady False
PodScheduled True
Volumes:
kube-api-access-nqr9d:
Type: Projected (a volume that contains injected data from multiple sources)
TokenExpirationSeconds: 3607
ConfigMapName: kube-root-ca.crt
ConfigMapOptional: <nil>
DownwardAPI: true
gcp-creds:
Type: HostPath (bare host directory volume)
Path: /var/lib/minikube/google_application_credentials.json
HostPathType: File
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 9m17s default-scheduler Successfully assigned default/busybox to addons-877987
Normal Pulling 7m50s (x4 over 9m17s) kubelet Pulling image "gcr.io/k8s-minikube/busybox:1.28.4-glibc"
Warning Failed 7m50s (x4 over 9m16s) kubelet Failed to pull image "gcr.io/k8s-minikube/busybox:1.28.4-glibc": Error response from daemon: Head "https://gcr.io/v2/k8s-minikube/busybox/manifests/1.28.4-glibc": unauthorized: authentication failed
Warning Failed 7m50s (x4 over 9m16s) kubelet Error: ErrImagePull
Warning Failed 7m36s (x6 over 9m16s) kubelet Error: ImagePullBackOff
Normal BackOff 4m11s (x21 over 9m16s) kubelet Back-off pulling image "gcr.io/k8s-minikube/busybox:1.28.4-glibc"
-- /stdout --
** stderr **
Error from server (NotFound): pods "ingress-nginx-admission-create-bgvbz" not found
Error from server (NotFound): pods "ingress-nginx-admission-patch-wrlgh" not found
** /stderr **
helpers_test.go:279: kubectl --context addons-877987 describe pod busybox ingress-nginx-admission-create-bgvbz ingress-nginx-admission-patch-wrlgh: exit status 1
--- FAIL: TestAddons/parallel/Registry (74.47s)