=== RUN TestAddons/parallel/Registry
=== PAUSE TestAddons/parallel/Registry
=== CONT TestAddons/parallel/Registry
addons_test.go:332: registry stabilized in 1.851172ms
addons_test.go:334: (dbg) TestAddons/parallel/Registry: waiting 6m0s for pods matching "actual-registry=true" in namespace "kube-system" ...
helpers_test.go:344: "registry-66c9cd494c-9jnkd" [fe7b5898-ffa3-4b87-b941-4220b03798f0] Running
addons_test.go:334: (dbg) TestAddons/parallel/Registry: actual-registry=true healthy within 6.003386929s
addons_test.go:337: (dbg) TestAddons/parallel/Registry: waiting 10m0s for pods matching "registry-proxy=true" in namespace "kube-system" ...
helpers_test.go:344: "registry-proxy-d2r6l" [5b832366-d825-43bd-8e89-8b85bdb2fda4] Running
addons_test.go:337: (dbg) TestAddons/parallel/Registry: registry-proxy=true healthy within 5.00579755s
addons_test.go:342: (dbg) Run: kubectl --context addons-657043 delete po -l run=registry-test --now
addons_test.go:347: (dbg) Run: kubectl --context addons-657043 run --rm registry-test --restart=Never --image=gcr.io/k8s-minikube/busybox -it -- sh -c "wget --spider -S http://registry.kube-system.svc.cluster.local"
addons_test.go:347: (dbg) Non-zero exit: kubectl --context addons-657043 run --rm registry-test --restart=Never --image=gcr.io/k8s-minikube/busybox -it -- sh -c "wget --spider -S http://registry.kube-system.svc.cluster.local": exit status 1 (1m0.090140008s)
-- stdout --
pod "registry-test" deleted
-- /stdout --
** stderr **
error: timed out waiting for the condition
** /stderr **
addons_test.go:349: failed to hit registry.kube-system.svc.cluster.local. args "kubectl --context addons-657043 run --rm registry-test --restart=Never --image=gcr.io/k8s-minikube/busybox -it -- sh -c \"wget --spider -S http://registry.kube-system.svc.cluster.local\"" failed: exit status 1
addons_test.go:353: expected curl response be "HTTP/1.1 200", but got *pod "registry-test" deleted
*
addons_test.go:361: (dbg) Run: out/minikube-linux-amd64 -p addons-657043 ip
2024/09/14 16:57:50 [DEBUG] GET http://192.168.49.2:5000
addons_test.go:390: (dbg) Run: out/minikube-linux-amd64 -p addons-657043 addons disable registry --alsologtostderr -v=1
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:230: ======> post-mortem[TestAddons/parallel/Registry]: docker inspect <======
helpers_test.go:231: (dbg) Run: docker inspect addons-657043
helpers_test.go:235: (dbg) docker inspect addons-657043:
-- stdout --
[
{
"Id": "d802592c5c25b6188b75680b1249ed995cdbc6d28841941a3864b5dd52562f35",
"Created": "2024-09-14T16:44:45.253776208Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 17590,
"ExitCode": 0,
"Error": "",
"StartedAt": "2024-09-14T16:44:45.385903155Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:88f675d47cf3251674dfc91344863c7a5ded6f8af97be97f42f48345581b0057",
"ResolvConfPath": "/var/lib/docker/containers/d802592c5c25b6188b75680b1249ed995cdbc6d28841941a3864b5dd52562f35/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/d802592c5c25b6188b75680b1249ed995cdbc6d28841941a3864b5dd52562f35/hostname",
"HostsPath": "/var/lib/docker/containers/d802592c5c25b6188b75680b1249ed995cdbc6d28841941a3864b5dd52562f35/hosts",
"LogPath": "/var/lib/docker/containers/d802592c5c25b6188b75680b1249ed995cdbc6d28841941a3864b5dd52562f35/d802592c5c25b6188b75680b1249ed995cdbc6d28841941a3864b5dd52562f35-json.log",
"Name": "/addons-657043",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"addons-657043:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {
"max-size": "100m"
}
},
"NetworkMode": "addons-657043",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 4194304000,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 8388608000,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"LowerDir": "/var/lib/docker/overlay2/2dbb33e63b8b397dc91dced230f806435eb3c4a418226def71f97f0757b3c363-init/diff:/var/lib/docker/overlay2/551c643982b32555722b1d49ee2b6c3818a3252f49128cabbd8d56618ea9e041/diff",
"MergedDir": "/var/lib/docker/overlay2/2dbb33e63b8b397dc91dced230f806435eb3c4a418226def71f97f0757b3c363/merged",
"UpperDir": "/var/lib/docker/overlay2/2dbb33e63b8b397dc91dced230f806435eb3c4a418226def71f97f0757b3c363/diff",
"WorkDir": "/var/lib/docker/overlay2/2dbb33e63b8b397dc91dced230f806435eb3c4a418226def71f97f0757b3c363/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "addons-657043",
"Source": "/var/lib/docker/volumes/addons-657043/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "addons-657043",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726281268-19643@sha256:cce8be0c1ac4e3d852132008ef1cc1dcf5b79f708d025db83f146ae65db32e8e",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "addons-657043",
"name.minikube.sigs.k8s.io": "addons-657043",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "5008368f32abaff5c372fdc4565e71d1acb8d2f3654fd8a262593b2c2372ce5e",
"SandboxKey": "/var/run/docker/netns/5008368f32ab",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32768"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32769"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32772"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32770"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32771"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"addons-657043": {
"IPAMConfig": {
"IPv4Address": "192.168.49.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "02:42:c0:a8:31:02",
"DriverOpts": null,
"NetworkID": "45c2511e25134fee8929d6dd83a96841aa03d91f8244f6f385e1cbb9ce557642",
"EndpointID": "4e2c255f7ae774700cd9d95b04872cc8154b3ca2e9a42f5d8f0e683dbac9f417",
"Gateway": "192.168.49.1",
"IPAddress": "192.168.49.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"addons-657043",
"d802592c5c25"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:239: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p addons-657043 -n addons-657043
helpers_test.go:244: <<< TestAddons/parallel/Registry FAILED: start of post-mortem logs <<<
helpers_test.go:245: ======> post-mortem[TestAddons/parallel/Registry]: minikube logs <======
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 -p addons-657043 logs -n 25
helpers_test.go:252: TestAddons/parallel/Registry logs:
-- stdout --
==> Audit <==
|---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
| Command | Args | Profile | User | Version | Start Time | End Time |
|---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
| delete | -p download-docker-333565 | download-docker-333565 | jenkins | v1.34.0 | 14 Sep 24 16:44 UTC | 14 Sep 24 16:44 UTC |
| start | --download-only -p | binary-mirror-501910 | jenkins | v1.34.0 | 14 Sep 24 16:44 UTC | |
| | binary-mirror-501910 | | | | | |
| | --alsologtostderr | | | | | |
| | --binary-mirror | | | | | |
| | http://127.0.0.1:36099 | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| delete | -p binary-mirror-501910 | binary-mirror-501910 | jenkins | v1.34.0 | 14 Sep 24 16:44 UTC | 14 Sep 24 16:44 UTC |
| addons | enable dashboard -p | addons-657043 | jenkins | v1.34.0 | 14 Sep 24 16:44 UTC | |
| | addons-657043 | | | | | |
| addons | disable dashboard -p | addons-657043 | jenkins | v1.34.0 | 14 Sep 24 16:44 UTC | |
| | addons-657043 | | | | | |
| start | -p addons-657043 --wait=true | addons-657043 | jenkins | v1.34.0 | 14 Sep 24 16:44 UTC | 14 Sep 24 16:47 UTC |
| | --memory=4000 --alsologtostderr | | | | | |
| | --addons=registry | | | | | |
| | --addons=metrics-server | | | | | |
| | --addons=volumesnapshots | | | | | |
| | --addons=csi-hostpath-driver | | | | | |
| | --addons=gcp-auth | | | | | |
| | --addons=cloud-spanner | | | | | |
| | --addons=inspektor-gadget | | | | | |
| | --addons=storage-provisioner-rancher | | | | | |
| | --addons=nvidia-device-plugin | | | | | |
| | --addons=yakd --addons=volcano | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| | --addons=ingress | | | | | |
| | --addons=ingress-dns | | | | | |
| | --addons=helm-tiller | | | | | |
| addons | addons-657043 addons disable | addons-657043 | jenkins | v1.34.0 | 14 Sep 24 16:48 UTC | 14 Sep 24 16:48 UTC |
| | volcano --alsologtostderr -v=1 | | | | | |
| addons | disable nvidia-device-plugin | addons-657043 | jenkins | v1.34.0 | 14 Sep 24 16:56 UTC | 14 Sep 24 16:56 UTC |
| | -p addons-657043 | | | | | |
| addons | addons-657043 addons disable | addons-657043 | jenkins | v1.34.0 | 14 Sep 24 16:56 UTC | 14 Sep 24 16:56 UTC |
| | yakd --alsologtostderr -v=1 | | | | | |
| ssh | addons-657043 ssh cat | addons-657043 | jenkins | v1.34.0 | 14 Sep 24 16:56 UTC | 14 Sep 24 16:56 UTC |
| | /opt/local-path-provisioner/pvc-798265a8-9729-4359-ae8a-3dea675bc137_default_test-pvc/file1 | | | | | |
| addons | disable cloud-spanner -p | addons-657043 | jenkins | v1.34.0 | 14 Sep 24 16:56 UTC | 14 Sep 24 16:56 UTC |
| | addons-657043 | | | | | |
| addons | enable headlamp | addons-657043 | jenkins | v1.34.0 | 14 Sep 24 16:56 UTC | 14 Sep 24 16:56 UTC |
| | -p addons-657043 | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-657043 addons disable | addons-657043 | jenkins | v1.34.0 | 14 Sep 24 16:56 UTC | 14 Sep 24 16:56 UTC |
| | storage-provisioner-rancher | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-657043 addons disable | addons-657043 | jenkins | v1.34.0 | 14 Sep 24 16:57 UTC | 14 Sep 24 16:57 UTC |
| | helm-tiller --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| addons | addons-657043 addons disable | addons-657043 | jenkins | v1.34.0 | 14 Sep 24 16:57 UTC | 14 Sep 24 16:57 UTC |
| | headlamp --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| addons | addons-657043 addons | addons-657043 | jenkins | v1.34.0 | 14 Sep 24 16:57 UTC | 14 Sep 24 16:57 UTC |
| | disable metrics-server | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | disable inspektor-gadget -p | addons-657043 | jenkins | v1.34.0 | 14 Sep 24 16:57 UTC | 14 Sep 24 16:57 UTC |
| | addons-657043 | | | | | |
| ssh | addons-657043 ssh curl -s | addons-657043 | jenkins | v1.34.0 | 14 Sep 24 16:57 UTC | 14 Sep 24 16:57 UTC |
| | http://127.0.0.1/ -H 'Host: | | | | | |
| | nginx.example.com' | | | | | |
| ip | addons-657043 ip | addons-657043 | jenkins | v1.34.0 | 14 Sep 24 16:57 UTC | 14 Sep 24 16:57 UTC |
| addons | addons-657043 addons disable | addons-657043 | jenkins | v1.34.0 | 14 Sep 24 16:57 UTC | 14 Sep 24 16:57 UTC |
| | ingress-dns --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| addons | addons-657043 addons disable | addons-657043 | jenkins | v1.34.0 | 14 Sep 24 16:57 UTC | 14 Sep 24 16:57 UTC |
| | ingress --alsologtostderr -v=1 | | | | | |
| addons | addons-657043 addons | addons-657043 | jenkins | v1.34.0 | 14 Sep 24 16:57 UTC | 14 Sep 24 16:57 UTC |
| | disable csi-hostpath-driver | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-657043 addons | addons-657043 | jenkins | v1.34.0 | 14 Sep 24 16:57 UTC | 14 Sep 24 16:57 UTC |
| | disable volumesnapshots | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| ip | addons-657043 ip | addons-657043 | jenkins | v1.34.0 | 14 Sep 24 16:57 UTC | 14 Sep 24 16:57 UTC |
| addons | addons-657043 addons disable | addons-657043 | jenkins | v1.34.0 | 14 Sep 24 16:57 UTC | 14 Sep 24 16:57 UTC |
| | registry --alsologtostderr | | | | | |
| | -v=1 | | | | | |
|---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
==> Last Start <==
Log file created at: 2024/09/14 16:44:24
Running on machine: ubuntu-20-agent
Binary: Built with gc go1.23.0 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I0914 16:44:24.389022 16836 out.go:345] Setting OutFile to fd 1 ...
I0914 16:44:24.389144 16836 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0914 16:44:24.389152 16836 out.go:358] Setting ErrFile to fd 2...
I0914 16:44:24.389156 16836 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0914 16:44:24.389333 16836 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19643-8643/.minikube/bin
I0914 16:44:24.389899 16836 out.go:352] Setting JSON to false
I0914 16:44:24.391088 16836 start.go:129] hostinfo: {"hostname":"ubuntu-20-agent","uptime":1608,"bootTime":1726330656,"procs":171,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1068-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I0914 16:44:24.391196 16836 start.go:139] virtualization: kvm guest
I0914 16:44:24.393938 16836 out.go:177] * [addons-657043] minikube v1.34.0 on Ubuntu 20.04 (kvm/amd64)
I0914 16:44:24.395367 16836 notify.go:220] Checking for updates...
I0914 16:44:24.395412 16836 out.go:177] - MINIKUBE_LOCATION=19643
I0914 16:44:24.396774 16836 out.go:177] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I0914 16:44:24.397992 16836 out.go:177] - KUBECONFIG=/home/jenkins/minikube-integration/19643-8643/kubeconfig
I0914 16:44:24.399192 16836 out.go:177] - MINIKUBE_HOME=/home/jenkins/minikube-integration/19643-8643/.minikube
I0914 16:44:24.400377 16836 out.go:177] - MINIKUBE_BIN=out/minikube-linux-amd64
I0914 16:44:24.401784 16836 out.go:177] - MINIKUBE_FORCE_SYSTEMD=
I0914 16:44:24.403378 16836 driver.go:394] Setting default libvirt URI to qemu:///system
I0914 16:44:24.427886 16836 docker.go:123] docker version: linux-27.2.1:Docker Engine - Community
I0914 16:44:24.427992 16836 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0914 16:44:24.474114 16836 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:30 OomKillDisable:true NGoroutines:45 SystemTime:2024-09-14 16:44:24.465151039 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1068-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:x86
_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33647939584 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent Labels:[] ExperimentalBuild:false ServerVersion:27.2.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:7f7fdf5fed64eb6a7caf99b3e12efcf9d60e311c Expected:7f7fdf5fed64eb6a7caf99b3e12efcf9d60e311c} RuncCommit:{ID:v1.1.14-0-g2c9f560 Expected:v1.1.14-0-g2c9f560} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerError
s:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.16.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.29.2] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0914 16:44:24.474211 16836 docker.go:318] overlay module found
I0914 16:44:24.476116 16836 out.go:177] * Using the docker driver based on user configuration
I0914 16:44:24.477305 16836 start.go:297] selected driver: docker
I0914 16:44:24.477318 16836 start.go:901] validating driver "docker" against <nil>
I0914 16:44:24.477329 16836 start.go:912] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I0914 16:44:24.478111 16836 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0914 16:44:24.527714 16836 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:30 OomKillDisable:true NGoroutines:45 SystemTime:2024-09-14 16:44:24.519432713 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1068-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:x86
_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33647939584 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent Labels:[] ExperimentalBuild:false ServerVersion:27.2.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:7f7fdf5fed64eb6a7caf99b3e12efcf9d60e311c Expected:7f7fdf5fed64eb6a7caf99b3e12efcf9d60e311c} RuncCommit:{ID:v1.1.14-0-g2c9f560 Expected:v1.1.14-0-g2c9f560} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerError
s:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.16.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.29.2] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0914 16:44:24.527866 16836 start_flags.go:310] no existing cluster config was found, will generate one from the flags
I0914 16:44:24.528127 16836 start_flags.go:947] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0914 16:44:24.529792 16836 out.go:177] * Using Docker driver with root privileges
I0914 16:44:24.531260 16836 cni.go:84] Creating CNI manager for ""
I0914 16:44:24.531320 16836 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0914 16:44:24.531330 16836 start_flags.go:319] Found "bridge CNI" CNI - setting NetworkPlugin=cni
I0914 16:44:24.531396 16836 start.go:340] cluster config:
{Name:addons-657043 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726281268-19643@sha256:cce8be0c1ac4e3d852132008ef1cc1dcf5b79f708d025db83f146ae65db32e8e Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.1 ClusterName:addons-657043 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime
:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock:
SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0914 16:44:24.532767 16836 out.go:177] * Starting "addons-657043" primary control-plane node in "addons-657043" cluster
I0914 16:44:24.533889 16836 cache.go:121] Beginning downloading kic base image for docker with docker
I0914 16:44:24.534943 16836 out.go:177] * Pulling base image v0.0.45-1726281268-19643 ...
I0914 16:44:24.535910 16836 preload.go:131] Checking if preload exists for k8s version v1.31.1 and runtime docker
I0914 16:44:24.535938 16836 preload.go:146] Found local preload: /home/jenkins/minikube-integration/19643-8643/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.1-docker-overlay2-amd64.tar.lz4
I0914 16:44:24.535951 16836 cache.go:56] Caching tarball of preloaded images
I0914 16:44:24.536008 16836 image.go:79] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726281268-19643@sha256:cce8be0c1ac4e3d852132008ef1cc1dcf5b79f708d025db83f146ae65db32e8e in local docker daemon
I0914 16:44:24.536030 16836 preload.go:172] Found /home/jenkins/minikube-integration/19643-8643/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.1-docker-overlay2-amd64.tar.lz4 in cache, skipping download
I0914 16:44:24.536041 16836 cache.go:59] Finished verifying existence of preloaded tar for v1.31.1 on docker
I0914 16:44:24.536367 16836 profile.go:143] Saving config to /home/jenkins/minikube-integration/19643-8643/.minikube/profiles/addons-657043/config.json ...
I0914 16:44:24.536441 16836 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19643-8643/.minikube/profiles/addons-657043/config.json: {Name:mk14eff27685a84851c56ec6a4987b83d546ad54 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0914 16:44:24.552055 16836 cache.go:149] Downloading gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726281268-19643@sha256:cce8be0c1ac4e3d852132008ef1cc1dcf5b79f708d025db83f146ae65db32e8e to local cache
I0914 16:44:24.552200 16836 image.go:63] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726281268-19643@sha256:cce8be0c1ac4e3d852132008ef1cc1dcf5b79f708d025db83f146ae65db32e8e in local cache directory
I0914 16:44:24.552218 16836 image.go:66] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726281268-19643@sha256:cce8be0c1ac4e3d852132008ef1cc1dcf5b79f708d025db83f146ae65db32e8e in local cache directory, skipping pull
I0914 16:44:24.552223 16836 image.go:135] gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726281268-19643@sha256:cce8be0c1ac4e3d852132008ef1cc1dcf5b79f708d025db83f146ae65db32e8e exists in cache, skipping pull
I0914 16:44:24.552230 16836 cache.go:152] successfully saved gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726281268-19643@sha256:cce8be0c1ac4e3d852132008ef1cc1dcf5b79f708d025db83f146ae65db32e8e as a tarball
I0914 16:44:24.552238 16836 cache.go:162] Loading gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726281268-19643@sha256:cce8be0c1ac4e3d852132008ef1cc1dcf5b79f708d025db83f146ae65db32e8e from local cache
I0914 16:44:36.794508 16836 cache.go:164] successfully loaded and using gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726281268-19643@sha256:cce8be0c1ac4e3d852132008ef1cc1dcf5b79f708d025db83f146ae65db32e8e from cached tarball
I0914 16:44:36.794543 16836 cache.go:194] Successfully downloaded all kic artifacts
I0914 16:44:36.794579 16836 start.go:360] acquireMachinesLock for addons-657043: {Name:mkf6a9d28c3a58106829f66124bf22585ce1d6a2 Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0914 16:44:36.794679 16836 start.go:364] duration metric: took 81.974µs to acquireMachinesLock for "addons-657043"
I0914 16:44:36.794701 16836 start.go:93] Provisioning new machine with config: &{Name:addons-657043 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726281268-19643@sha256:cce8be0c1ac4e3d852132008ef1cc1dcf5b79f708d025db83f146ae65db32e8e Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.1 ClusterName:addons-657043 Namespace:default APIServerHAVIP: APIServerName:min
ikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false Cust
omQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}
I0914 16:44:36.794774 16836 start.go:125] createHost starting for "" (driver="docker")
I0914 16:44:36.796561 16836 out.go:235] * Creating docker container (CPUs=2, Memory=4000MB) ...
I0914 16:44:36.796793 16836 start.go:159] libmachine.API.Create for "addons-657043" (driver="docker")
I0914 16:44:36.796825 16836 client.go:168] LocalClient.Create starting
I0914 16:44:36.796925 16836 main.go:141] libmachine: Creating CA: /home/jenkins/minikube-integration/19643-8643/.minikube/certs/ca.pem
I0914 16:44:36.915725 16836 main.go:141] libmachine: Creating client certificate: /home/jenkins/minikube-integration/19643-8643/.minikube/certs/cert.pem
I0914 16:44:37.147747 16836 cli_runner.go:164] Run: docker network inspect addons-657043 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W0914 16:44:37.163850 16836 cli_runner.go:211] docker network inspect addons-657043 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I0914 16:44:37.163937 16836 network_create.go:284] running [docker network inspect addons-657043] to gather additional debugging logs...
I0914 16:44:37.163961 16836 cli_runner.go:164] Run: docker network inspect addons-657043
W0914 16:44:37.179467 16836 cli_runner.go:211] docker network inspect addons-657043 returned with exit code 1
I0914 16:44:37.179504 16836 network_create.go:287] error running [docker network inspect addons-657043]: docker network inspect addons-657043: exit status 1
stdout:
[]
stderr:
Error response from daemon: network addons-657043 not found
I0914 16:44:37.179515 16836 network_create.go:289] output of [docker network inspect addons-657043]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network addons-657043 not found
** /stderr **
I0914 16:44:37.179883 16836 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0914 16:44:37.196361 16836 network.go:206] using free private subnet 192.168.49.0/24: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc0009fa790}
I0914 16:44:37.196408 16836 network_create.go:124] attempt to create docker network addons-657043 192.168.49.0/24 with gateway 192.168.49.1 and MTU of 1500 ...
I0914 16:44:37.196451 16836 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.49.0/24 --gateway=192.168.49.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=addons-657043 addons-657043
I0914 16:44:37.258257 16836 network_create.go:108] docker network addons-657043 192.168.49.0/24 created
I0914 16:44:37.258288 16836 kic.go:121] calculated static IP "192.168.49.2" for the "addons-657043" container
I0914 16:44:37.258351 16836 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0914 16:44:37.273346 16836 cli_runner.go:164] Run: docker volume create addons-657043 --label name.minikube.sigs.k8s.io=addons-657043 --label created_by.minikube.sigs.k8s.io=true
I0914 16:44:37.291064 16836 oci.go:103] Successfully created a docker volume addons-657043
I0914 16:44:37.291152 16836 cli_runner.go:164] Run: docker run --rm --name addons-657043-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-657043 --entrypoint /usr/bin/test -v addons-657043:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726281268-19643@sha256:cce8be0c1ac4e3d852132008ef1cc1dcf5b79f708d025db83f146ae65db32e8e -d /var/lib
I0914 16:44:41.292562 16836 cli_runner.go:217] Completed: docker run --rm --name addons-657043-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-657043 --entrypoint /usr/bin/test -v addons-657043:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726281268-19643@sha256:cce8be0c1ac4e3d852132008ef1cc1dcf5b79f708d025db83f146ae65db32e8e -d /var/lib: (4.001368002s)
I0914 16:44:41.292591 16836 oci.go:107] Successfully prepared a docker volume addons-657043
I0914 16:44:41.292620 16836 preload.go:131] Checking if preload exists for k8s version v1.31.1 and runtime docker
I0914 16:44:41.292644 16836 kic.go:194] Starting extracting preloaded images to volume ...
I0914 16:44:41.292705 16836 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/19643-8643/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.1-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v addons-657043:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726281268-19643@sha256:cce8be0c1ac4e3d852132008ef1cc1dcf5b79f708d025db83f146ae65db32e8e -I lz4 -xf /preloaded.tar -C /extractDir
I0914 16:44:45.188390 16836 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/19643-8643/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.1-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v addons-657043:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726281268-19643@sha256:cce8be0c1ac4e3d852132008ef1cc1dcf5b79f708d025db83f146ae65db32e8e -I lz4 -xf /preloaded.tar -C /extractDir: (3.89563946s)
I0914 16:44:45.188418 16836 kic.go:203] duration metric: took 3.895772817s to extract preloaded images to volume ...
W0914 16:44:45.188533 16836 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I0914 16:44:45.188619 16836 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0914 16:44:45.238900 16836 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname addons-657043 --name addons-657043 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-657043 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=addons-657043 --network addons-657043 --ip 192.168.49.2 --volume addons-657043:/var --security-opt apparmor=unconfined --memory=4000mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726281268-19643@sha256:cce8be0c1ac4e3d852132008ef1cc1dcf5b79f708d025db83f146ae65db32e8e
I0914 16:44:45.539337 16836 cli_runner.go:164] Run: docker container inspect addons-657043 --format={{.State.Running}}
I0914 16:44:45.557465 16836 cli_runner.go:164] Run: docker container inspect addons-657043 --format={{.State.Status}}
I0914 16:44:45.575661 16836 cli_runner.go:164] Run: docker exec addons-657043 stat /var/lib/dpkg/alternatives/iptables
I0914 16:44:45.618889 16836 oci.go:144] the created container "addons-657043" has a running status.
I0914 16:44:45.618923 16836 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/19643-8643/.minikube/machines/addons-657043/id_rsa...
I0914 16:44:45.733551 16836 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/19643-8643/.minikube/machines/addons-657043/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0914 16:44:45.754470 16836 cli_runner.go:164] Run: docker container inspect addons-657043 --format={{.State.Status}}
I0914 16:44:45.774385 16836 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0914 16:44:45.774410 16836 kic_runner.go:114] Args: [docker exec --privileged addons-657043 chown docker:docker /home/docker/.ssh/authorized_keys]
I0914 16:44:45.815156 16836 cli_runner.go:164] Run: docker container inspect addons-657043 --format={{.State.Status}}
I0914 16:44:45.837480 16836 machine.go:93] provisionDockerMachine start ...
I0914 16:44:45.837562 16836 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-657043
I0914 16:44:45.857104 16836 main.go:141] libmachine: Using SSH client type: native
I0914 16:44:45.857362 16836 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x86c560] 0x86f240 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0914 16:44:45.857377 16836 main.go:141] libmachine: About to run SSH command:
hostname
I0914 16:44:45.858012 16836 main.go:141] libmachine: Error dialing TCP: ssh: handshake failed: read tcp 127.0.0.1:49428->127.0.0.1:32768: read: connection reset by peer
I0914 16:44:48.979359 16836 main.go:141] libmachine: SSH cmd err, output: <nil>: addons-657043
I0914 16:44:48.979389 16836 ubuntu.go:169] provisioning hostname "addons-657043"
I0914 16:44:48.979449 16836 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-657043
I0914 16:44:48.996504 16836 main.go:141] libmachine: Using SSH client type: native
I0914 16:44:48.996719 16836 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x86c560] 0x86f240 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0914 16:44:48.996739 16836 main.go:141] libmachine: About to run SSH command:
sudo hostname addons-657043 && echo "addons-657043" | sudo tee /etc/hostname
I0914 16:44:49.127047 16836 main.go:141] libmachine: SSH cmd err, output: <nil>: addons-657043
I0914 16:44:49.127113 16836 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-657043
I0914 16:44:49.143967 16836 main.go:141] libmachine: Using SSH client type: native
I0914 16:44:49.144183 16836 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x86c560] 0x86f240 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0914 16:44:49.144202 16836 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\saddons-657043' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 addons-657043/g' /etc/hosts;
else
echo '127.0.1.1 addons-657043' | sudo tee -a /etc/hosts;
fi
fi
I0914 16:44:49.263920 16836 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0914 16:44:49.263945 16836 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/19643-8643/.minikube CaCertPath:/home/jenkins/minikube-integration/19643-8643/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/19643-8643/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/19643-8643/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/19643-8643/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/19643-8643/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/19643-8643/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/19643-8643/.minikube}
I0914 16:44:49.263983 16836 ubuntu.go:177] setting up certificates
I0914 16:44:49.263995 16836 provision.go:84] configureAuth start
I0914 16:44:49.264047 16836 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-657043
I0914 16:44:49.280208 16836 provision.go:143] copyHostCerts
I0914 16:44:49.280285 16836 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19643-8643/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/19643-8643/.minikube/cert.pem (1123 bytes)
I0914 16:44:49.280409 16836 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19643-8643/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/19643-8643/.minikube/key.pem (1679 bytes)
I0914 16:44:49.280467 16836 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19643-8643/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/19643-8643/.minikube/ca.pem (1078 bytes)
I0914 16:44:49.280514 16836 provision.go:117] generating server cert: /home/jenkins/minikube-integration/19643-8643/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/19643-8643/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/19643-8643/.minikube/certs/ca-key.pem org=jenkins.addons-657043 san=[127.0.0.1 192.168.49.2 addons-657043 localhost minikube]
I0914 16:44:49.417607 16836 provision.go:177] copyRemoteCerts
I0914 16:44:49.417666 16836 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0914 16:44:49.417699 16836 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-657043
I0914 16:44:49.434620 16836 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19643-8643/.minikube/machines/addons-657043/id_rsa Username:docker}
I0914 16:44:49.528493 16836 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19643-8643/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I0914 16:44:49.550098 16836 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19643-8643/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1078 bytes)
I0914 16:44:49.571627 16836 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19643-8643/.minikube/machines/server.pem --> /etc/docker/server.pem (1208 bytes)
I0914 16:44:49.593269 16836 provision.go:87] duration metric: took 329.257973ms to configureAuth
I0914 16:44:49.593291 16836 ubuntu.go:193] setting minikube options for container-runtime
I0914 16:44:49.593484 16836 config.go:182] Loaded profile config "addons-657043": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.31.1
I0914 16:44:49.593538 16836 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-657043
I0914 16:44:49.611364 16836 main.go:141] libmachine: Using SSH client type: native
I0914 16:44:49.611560 16836 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x86c560] 0x86f240 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0914 16:44:49.611574 16836 main.go:141] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I0914 16:44:49.732397 16836 main.go:141] libmachine: SSH cmd err, output: <nil>: overlay
I0914 16:44:49.732422 16836 ubuntu.go:71] root file system type: overlay
I0914 16:44:49.732544 16836 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I0914 16:44:49.732607 16836 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-657043
I0914 16:44:49.749229 16836 main.go:141] libmachine: Using SSH client type: native
I0914 16:44:49.749459 16836 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x86c560] 0x86f240 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0914 16:44:49.749528 16836 main.go:141] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I0914 16:44:49.882426 16836 main.go:141] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
I0914 16:44:49.882500 16836 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-657043
I0914 16:44:49.899513 16836 main.go:141] libmachine: Using SSH client type: native
I0914 16:44:49.899711 16836 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x86c560] 0x86f240 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0914 16:44:49.899735 16836 main.go:141] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I0914 16:44:50.581259 16836 main.go:141] libmachine: SSH cmd err, output: <nil>: --- /lib/systemd/system/docker.service 2024-09-06 12:06:41.000000000 +0000
+++ /lib/systemd/system/docker.service.new 2024-09-14 16:44:49.877627119 +0000
@@ -1,46 +1,49 @@
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
-After=network-online.target docker.socket firewalld.service containerd.service time-set.target
-Wants=network-online.target containerd.service
+BindsTo=containerd.service
+After=network-online.target firewalld.service containerd.service
+Wants=network-online.target
Requires=docker.socket
+StartLimitBurst=3
+StartLimitIntervalSec=60
[Service]
Type=notify
-# the default is not to use systemd for cgroups because the delegate issues still
-# exists and systemd currently does not support the cgroup feature set required
-# for containers run by docker
-ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
-ExecReload=/bin/kill -s HUP $MAINPID
-TimeoutStartSec=0
-RestartSec=2
-Restart=always
+Restart=on-failure
-# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
-# Both the old, and new location are accepted by systemd 229 and up, so using the old location
-# to make them work for either version of systemd.
-StartLimitBurst=3
-# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
-# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
-# this option work for either version of systemd.
-StartLimitInterval=60s
+
+# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
+# The base configuration already specifies an 'ExecStart=...' command. The first directive
+# here is to clear out that command inherited from the base configuration. Without this,
+# the command from the base configuration and the command specified here are treated as
+# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
+# will catch this invalid input and refuse to start the service with an error like:
+# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
+
+# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
+# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
+ExecStart=
+ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
+ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
+LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
-# Comment TasksMax if your systemd version does not support it.
-# Only systemd 226 and above support this option.
+# Uncomment TasksMax if your systemd version supports it.
+# Only systemd 226 and above support this version.
TasksMax=infinity
+TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
-OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
Synchronizing state of docker.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable docker
I0914 16:44:50.581296 16836 machine.go:96] duration metric: took 4.743788557s to provisionDockerMachine
I0914 16:44:50.581307 16836 client.go:171] duration metric: took 13.784475072s to LocalClient.Create
I0914 16:44:50.581324 16836 start.go:167] duration metric: took 13.784532522s to libmachine.API.Create "addons-657043"
I0914 16:44:50.581331 16836 start.go:293] postStartSetup for "addons-657043" (driver="docker")
I0914 16:44:50.581340 16836 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0914 16:44:50.581386 16836 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0914 16:44:50.581423 16836 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-657043
I0914 16:44:50.597647 16836 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19643-8643/.minikube/machines/addons-657043/id_rsa Username:docker}
I0914 16:44:50.684472 16836 ssh_runner.go:195] Run: cat /etc/os-release
I0914 16:44:50.687534 16836 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0914 16:44:50.687562 16836 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0914 16:44:50.687572 16836 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0914 16:44:50.687581 16836 info.go:137] Remote host: Ubuntu 22.04.4 LTS
I0914 16:44:50.687593 16836 filesync.go:126] Scanning /home/jenkins/minikube-integration/19643-8643/.minikube/addons for local assets ...
I0914 16:44:50.687647 16836 filesync.go:126] Scanning /home/jenkins/minikube-integration/19643-8643/.minikube/files for local assets ...
I0914 16:44:50.687668 16836 start.go:296] duration metric: took 106.33205ms for postStartSetup
I0914 16:44:50.687953 16836 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-657043
I0914 16:44:50.705060 16836 profile.go:143] Saving config to /home/jenkins/minikube-integration/19643-8643/.minikube/profiles/addons-657043/config.json ...
I0914 16:44:50.705333 16836 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0914 16:44:50.705379 16836 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-657043
I0914 16:44:50.721855 16836 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19643-8643/.minikube/machines/addons-657043/id_rsa Username:docker}
I0914 16:44:50.808716 16836 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0914 16:44:50.812989 16836 start.go:128] duration metric: took 14.018202994s to createHost
I0914 16:44:50.813016 16836 start.go:83] releasing machines lock for "addons-657043", held for 14.018325865s
I0914 16:44:50.813085 16836 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-657043
I0914 16:44:50.829578 16836 ssh_runner.go:195] Run: cat /version.json
I0914 16:44:50.829624 16836 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-657043
I0914 16:44:50.829755 16836 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0914 16:44:50.829827 16836 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-657043
I0914 16:44:50.846090 16836 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19643-8643/.minikube/machines/addons-657043/id_rsa Username:docker}
I0914 16:44:50.846610 16836 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19643-8643/.minikube/machines/addons-657043/id_rsa Username:docker}
I0914 16:44:51.004791 16836 ssh_runner.go:195] Run: systemctl --version
I0914 16:44:51.008974 16836 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0914 16:44:51.013011 16836 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I0914 16:44:51.036571 16836 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I0914 16:44:51.036653 16836 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0914 16:44:51.064252 16836 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I0914 16:44:51.064281 16836 start.go:495] detecting cgroup driver to use...
I0914 16:44:51.064311 16836 detect.go:187] detected "cgroupfs" cgroup driver on host os
I0914 16:44:51.064418 16836 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0914 16:44:51.079516 16836 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10"|' /etc/containerd/config.toml"
I0914 16:44:51.088730 16836 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0914 16:44:51.098260 16836 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I0914 16:44:51.098325 16836 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I0914 16:44:51.108173 16836 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0914 16:44:51.117198 16836 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0914 16:44:51.126470 16836 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0914 16:44:51.135700 16836 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0914 16:44:51.144161 16836 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0914 16:44:51.153094 16836 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0914 16:44:51.161890 16836 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0914 16:44:51.170917 16836 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0914 16:44:51.178890 16836 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0914 16:44:51.186783 16836 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0914 16:44:51.259126 16836 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0914 16:44:51.343603 16836 start.go:495] detecting cgroup driver to use...
I0914 16:44:51.343653 16836 detect.go:187] detected "cgroupfs" cgroup driver on host os
I0914 16:44:51.343709 16836 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I0914 16:44:51.354722 16836 cruntime.go:279] skipping containerd shutdown because we are bound to it
I0914 16:44:51.354782 16836 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0914 16:44:51.365601 16836 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I0914 16:44:51.381109 16836 ssh_runner.go:195] Run: which cri-dockerd
I0914 16:44:51.384165 16836 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I0914 16:44:51.392889 16836 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (190 bytes)
I0914 16:44:51.419710 16836 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I0914 16:44:51.513970 16836 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I0914 16:44:51.592034 16836 docker.go:574] configuring docker to use "cgroupfs" as cgroup driver...
I0914 16:44:51.592171 16836 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
I0914 16:44:51.616864 16836 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0914 16:44:51.697198 16836 ssh_runner.go:195] Run: sudo systemctl restart docker
I0914 16:44:51.940889 16836 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I0914 16:44:51.951555 16836 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0914 16:44:51.961881 16836 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I0914 16:44:52.042412 16836 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I0914 16:44:52.121983 16836 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0914 16:44:52.193470 16836 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I0914 16:44:52.205675 16836 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0914 16:44:52.215499 16836 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0914 16:44:52.286393 16836 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I0914 16:44:52.344953 16836 start.go:542] Will wait 60s for socket path /var/run/cri-dockerd.sock
I0914 16:44:52.345023 16836 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I0914 16:44:52.348379 16836 start.go:563] Will wait 60s for crictl version
I0914 16:44:52.348440 16836 ssh_runner.go:195] Run: which crictl
I0914 16:44:52.351442 16836 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0914 16:44:52.384365 16836 start.go:579] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 27.2.1
RuntimeApiVersion: v1
I0914 16:44:52.384423 16836 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0914 16:44:52.407793 16836 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0914 16:44:52.432456 16836 out.go:235] * Preparing Kubernetes v1.31.1 on Docker 27.2.1 ...
I0914 16:44:52.432531 16836 cli_runner.go:164] Run: docker network inspect addons-657043 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0914 16:44:52.447930 16836 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I0914 16:44:52.451395 16836 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0914 16:44:52.461536 16836 kubeadm.go:883] updating cluster {Name:addons-657043 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726281268-19643@sha256:cce8be0c1ac4e3d852132008ef1cc1dcf5b79f708d025db83f146ae65db32e8e Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.1 ClusterName:addons-657043 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNa
mes:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuF
irmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I0914 16:44:52.461652 16836 preload.go:131] Checking if preload exists for k8s version v1.31.1 and runtime docker
I0914 16:44:52.461713 16836 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0914 16:44:52.479448 16836 docker.go:685] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.31.1
registry.k8s.io/kube-controller-manager:v1.31.1
registry.k8s.io/kube-scheduler:v1.31.1
registry.k8s.io/kube-proxy:v1.31.1
registry.k8s.io/coredns/coredns:v1.11.3
registry.k8s.io/etcd:3.5.15-0
registry.k8s.io/pause:3.10
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I0914 16:44:52.479471 16836 docker.go:615] Images already preloaded, skipping extraction
I0914 16:44:52.479533 16836 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0914 16:44:52.497033 16836 docker.go:685] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.31.1
registry.k8s.io/kube-controller-manager:v1.31.1
registry.k8s.io/kube-scheduler:v1.31.1
registry.k8s.io/kube-proxy:v1.31.1
registry.k8s.io/coredns/coredns:v1.11.3
registry.k8s.io/etcd:3.5.15-0
registry.k8s.io/pause:3.10
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I0914 16:44:52.497054 16836 cache_images.go:84] Images are preloaded, skipping loading
I0914 16:44:52.497064 16836 kubeadm.go:934] updating node { 192.168.49.2 8443 v1.31.1 docker true true} ...
I0914 16:44:52.497147 16836 kubeadm.go:946] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.31.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=addons-657043 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.2
[Install]
config:
{KubernetesVersion:v1.31.1 ClusterName:addons-657043 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0914 16:44:52.497196 16836 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
I0914 16:44:52.540295 16836 cni.go:84] Creating CNI manager for ""
I0914 16:44:52.540319 16836 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0914 16:44:52.540329 16836 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
I0914 16:44:52.540345 16836 kubeadm.go:181] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8443 KubernetesVersion:v1.31.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:addons-657043 NodeName:addons-657043 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.49.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kuber
netes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I0914 16:44:52.540476 16836 kubeadm.go:187] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.49.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
name: "addons-657043"
kubeletExtraArgs:
node-ip: 192.168.49.2
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.31.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I0914 16:44:52.540530 16836 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.31.1
I0914 16:44:52.548568 16836 binaries.go:44] Found k8s binaries, skipping transfer
I0914 16:44:52.548628 16836 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I0914 16:44:52.556337 16836 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (312 bytes)
I0914 16:44:52.572027 16836 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0914 16:44:52.587972 16836 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2155 bytes)
I0914 16:44:52.604032 16836 ssh_runner.go:195] Run: grep 192.168.49.2 control-plane.minikube.internal$ /etc/hosts
I0914 16:44:52.607157 16836 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0914 16:44:52.616802 16836 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0914 16:44:52.690775 16836 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0914 16:44:52.703126 16836 certs.go:68] Setting up /home/jenkins/minikube-integration/19643-8643/.minikube/profiles/addons-657043 for IP: 192.168.49.2
I0914 16:44:52.703148 16836 certs.go:194] generating shared ca certs ...
I0914 16:44:52.703167 16836 certs.go:226] acquiring lock for ca certs: {Name:mk914fe431cf2c03bbbc89acb1da4db675a44dca Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0914 16:44:52.703294 16836 certs.go:240] generating "minikubeCA" ca cert: /home/jenkins/minikube-integration/19643-8643/.minikube/ca.key
I0914 16:44:52.790692 16836 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19643-8643/.minikube/ca.crt ...
I0914 16:44:52.790720 16836 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19643-8643/.minikube/ca.crt: {Name:mkea45d075bae139db25adb04ef999f8845a98e9 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0914 16:44:52.790883 16836 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19643-8643/.minikube/ca.key ...
I0914 16:44:52.790893 16836 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19643-8643/.minikube/ca.key: {Name:mkb282150a8850dbb9c83d0f6ea98cf74744905f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0914 16:44:52.790962 16836 certs.go:240] generating "proxyClientCA" ca cert: /home/jenkins/minikube-integration/19643-8643/.minikube/proxy-client-ca.key
I0914 16:44:52.862336 16836 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19643-8643/.minikube/proxy-client-ca.crt ...
I0914 16:44:52.862362 16836 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19643-8643/.minikube/proxy-client-ca.crt: {Name:mk7a4c38d1b4da2a855fe6097daaa08ccf6e3fca Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0914 16:44:52.862508 16836 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19643-8643/.minikube/proxy-client-ca.key ...
I0914 16:44:52.862519 16836 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19643-8643/.minikube/proxy-client-ca.key: {Name:mk226f25609d001858da0a66b9553ae0c985a704 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0914 16:44:52.862582 16836 certs.go:256] generating profile certs ...
I0914 16:44:52.862629 16836 certs.go:363] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/19643-8643/.minikube/profiles/addons-657043/client.key
I0914 16:44:52.862649 16836 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19643-8643/.minikube/profiles/addons-657043/client.crt with IP's: []
I0914 16:44:52.905614 16836 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19643-8643/.minikube/profiles/addons-657043/client.crt ...
I0914 16:44:52.905639 16836 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19643-8643/.minikube/profiles/addons-657043/client.crt: {Name:mkcdd5c51eccb5a53a79efdb4225fb7e556208a3 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0914 16:44:52.905782 16836 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19643-8643/.minikube/profiles/addons-657043/client.key ...
I0914 16:44:52.905792 16836 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19643-8643/.minikube/profiles/addons-657043/client.key: {Name:mk2676add01c978bceed033654b3a7e202d7a6c4 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0914 16:44:52.905855 16836 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/19643-8643/.minikube/profiles/addons-657043/apiserver.key.0639f098
I0914 16:44:52.905872 16836 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19643-8643/.minikube/profiles/addons-657043/apiserver.crt.0639f098 with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.49.2]
I0914 16:44:53.068192 16836 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19643-8643/.minikube/profiles/addons-657043/apiserver.crt.0639f098 ...
I0914 16:44:53.068224 16836 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19643-8643/.minikube/profiles/addons-657043/apiserver.crt.0639f098: {Name:mk0b80cda560625d0b5bc7a66518976ac713d062 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0914 16:44:53.068380 16836 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19643-8643/.minikube/profiles/addons-657043/apiserver.key.0639f098 ...
I0914 16:44:53.068393 16836 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19643-8643/.minikube/profiles/addons-657043/apiserver.key.0639f098: {Name:mk239856c7d1dbe60bc601592c03f08d5c48de4f Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0914 16:44:53.068467 16836 certs.go:381] copying /home/jenkins/minikube-integration/19643-8643/.minikube/profiles/addons-657043/apiserver.crt.0639f098 -> /home/jenkins/minikube-integration/19643-8643/.minikube/profiles/addons-657043/apiserver.crt
I0914 16:44:53.068538 16836 certs.go:385] copying /home/jenkins/minikube-integration/19643-8643/.minikube/profiles/addons-657043/apiserver.key.0639f098 -> /home/jenkins/minikube-integration/19643-8643/.minikube/profiles/addons-657043/apiserver.key
I0914 16:44:53.068583 16836 certs.go:363] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/19643-8643/.minikube/profiles/addons-657043/proxy-client.key
I0914 16:44:53.068606 16836 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19643-8643/.minikube/profiles/addons-657043/proxy-client.crt with IP's: []
I0914 16:44:53.308694 16836 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19643-8643/.minikube/profiles/addons-657043/proxy-client.crt ...
I0914 16:44:53.308729 16836 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19643-8643/.minikube/profiles/addons-657043/proxy-client.crt: {Name:mkac81ad2ef2a6fa8315135b98f21a958286721a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0914 16:44:53.308922 16836 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19643-8643/.minikube/profiles/addons-657043/proxy-client.key ...
I0914 16:44:53.308938 16836 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19643-8643/.minikube/profiles/addons-657043/proxy-client.key: {Name:mkb204c105fa1209dc1d0b4ac81d258a64b2a591 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0914 16:44:53.309131 16836 certs.go:484] found cert: /home/jenkins/minikube-integration/19643-8643/.minikube/certs/ca-key.pem (1675 bytes)
I0914 16:44:53.309164 16836 certs.go:484] found cert: /home/jenkins/minikube-integration/19643-8643/.minikube/certs/ca.pem (1078 bytes)
I0914 16:44:53.309188 16836 certs.go:484] found cert: /home/jenkins/minikube-integration/19643-8643/.minikube/certs/cert.pem (1123 bytes)
I0914 16:44:53.309211 16836 certs.go:484] found cert: /home/jenkins/minikube-integration/19643-8643/.minikube/certs/key.pem (1679 bytes)
I0914 16:44:53.309732 16836 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19643-8643/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0914 16:44:53.331738 16836 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19643-8643/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I0914 16:44:53.352820 16836 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19643-8643/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0914 16:44:53.374658 16836 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19643-8643/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1679 bytes)
I0914 16:44:53.397201 16836 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19643-8643/.minikube/profiles/addons-657043/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1419 bytes)
I0914 16:44:53.418915 16836 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19643-8643/.minikube/profiles/addons-657043/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes)
I0914 16:44:53.439947 16836 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19643-8643/.minikube/profiles/addons-657043/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0914 16:44:53.461224 16836 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19643-8643/.minikube/profiles/addons-657043/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I0914 16:44:53.481859 16836 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19643-8643/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0914 16:44:53.503168 16836 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I0914 16:44:53.519053 16836 ssh_runner.go:195] Run: openssl version
I0914 16:44:53.524102 16836 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0914 16:44:53.532857 16836 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0914 16:44:53.535869 16836 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Sep 14 16:44 /usr/share/ca-certificates/minikubeCA.pem
I0914 16:44:53.535909 16836 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0914 16:44:53.541878 16836 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0914 16:44:53.550069 16836 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0914 16:44:53.553023 16836 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I0914 16:44:53.553067 16836 kubeadm.go:392] StartCluster: {Name:addons-657043 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726281268-19643@sha256:cce8be0c1ac4e3d852132008ef1cc1dcf5b79f708d025db83f146ae65db32e8e Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.1 ClusterName:addons-657043 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames
:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirm
warePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0914 16:44:53.553158 16836 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I0914 16:44:53.569790 16836 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I0914 16:44:53.577824 16836 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I0914 16:44:53.585874 16836 kubeadm.go:214] ignoring SystemVerification for kubeadm because of docker driver
I0914 16:44:53.585917 16836 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I0914 16:44:53.593309 16836 kubeadm.go:155] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I0914 16:44:53.593328 16836 kubeadm.go:157] found existing configuration files:
I0914 16:44:53.593364 16836 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I0914 16:44:53.600780 16836 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I0914 16:44:53.600828 16836 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I0914 16:44:53.608196 16836 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I0914 16:44:53.615719 16836 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I0914 16:44:53.615775 16836 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I0914 16:44:53.623106 16836 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I0914 16:44:53.630567 16836 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I0914 16:44:53.630613 16836 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I0914 16:44:53.638277 16836 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I0914 16:44:53.646059 16836 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I0914 16:44:53.646107 16836 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I0914 16:44:53.654324 16836 ssh_runner.go:286] Start: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.31.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I0914 16:44:53.689016 16836 kubeadm.go:310] [init] Using Kubernetes version: v1.31.1
I0914 16:44:53.689066 16836 kubeadm.go:310] [preflight] Running pre-flight checks
I0914 16:44:53.708711 16836 kubeadm.go:310] [preflight] The system verification failed. Printing the output from the verification:
I0914 16:44:53.708804 16836 kubeadm.go:310] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1068-gcp[0m
I0914 16:44:53.708845 16836 kubeadm.go:310] [0;37mOS[0m: [0;32mLinux[0m
I0914 16:44:53.708900 16836 kubeadm.go:310] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I0914 16:44:53.708961 16836 kubeadm.go:310] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I0914 16:44:53.709009 16836 kubeadm.go:310] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I0914 16:44:53.709076 16836 kubeadm.go:310] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I0914 16:44:53.709119 16836 kubeadm.go:310] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I0914 16:44:53.709162 16836 kubeadm.go:310] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I0914 16:44:53.709236 16836 kubeadm.go:310] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I0914 16:44:53.709310 16836 kubeadm.go:310] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I0914 16:44:53.709376 16836 kubeadm.go:310] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I0914 16:44:53.756146 16836 kubeadm.go:310] [preflight] Pulling images required for setting up a Kubernetes cluster
I0914 16:44:53.756272 16836 kubeadm.go:310] [preflight] This might take a minute or two, depending on the speed of your internet connection
I0914 16:44:53.756402 16836 kubeadm.go:310] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I0914 16:44:53.766770 16836 kubeadm.go:310] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I0914 16:44:53.770236 16836 out.go:235] - Generating certificates and keys ...
I0914 16:44:53.770347 16836 kubeadm.go:310] [certs] Using existing ca certificate authority
I0914 16:44:53.770461 16836 kubeadm.go:310] [certs] Using existing apiserver certificate and key on disk
I0914 16:44:54.001614 16836 kubeadm.go:310] [certs] Generating "apiserver-kubelet-client" certificate and key
I0914 16:44:54.145881 16836 kubeadm.go:310] [certs] Generating "front-proxy-ca" certificate and key
I0914 16:44:54.491783 16836 kubeadm.go:310] [certs] Generating "front-proxy-client" certificate and key
I0914 16:44:54.600526 16836 kubeadm.go:310] [certs] Generating "etcd/ca" certificate and key
I0914 16:44:54.736713 16836 kubeadm.go:310] [certs] Generating "etcd/server" certificate and key
I0914 16:44:54.736904 16836 kubeadm.go:310] [certs] etcd/server serving cert is signed for DNS names [addons-657043 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0914 16:44:55.150486 16836 kubeadm.go:310] [certs] Generating "etcd/peer" certificate and key
I0914 16:44:55.150682 16836 kubeadm.go:310] [certs] etcd/peer serving cert is signed for DNS names [addons-657043 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0914 16:44:55.344924 16836 kubeadm.go:310] [certs] Generating "etcd/healthcheck-client" certificate and key
I0914 16:44:55.617219 16836 kubeadm.go:310] [certs] Generating "apiserver-etcd-client" certificate and key
I0914 16:44:55.914098 16836 kubeadm.go:310] [certs] Generating "sa" key and public key
I0914 16:44:55.914189 16836 kubeadm.go:310] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I0914 16:44:56.089150 16836 kubeadm.go:310] [kubeconfig] Writing "admin.conf" kubeconfig file
I0914 16:44:56.158141 16836 kubeadm.go:310] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I0914 16:44:56.403720 16836 kubeadm.go:310] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I0914 16:44:56.755465 16836 kubeadm.go:310] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I0914 16:44:56.828499 16836 kubeadm.go:310] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I0914 16:44:56.828948 16836 kubeadm.go:310] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I0914 16:44:56.831313 16836 kubeadm.go:310] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I0914 16:44:56.833348 16836 out.go:235] - Booting up control plane ...
I0914 16:44:56.833471 16836 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-apiserver"
I0914 16:44:56.833577 16836 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I0914 16:44:56.833853 16836 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-scheduler"
I0914 16:44:56.843528 16836 kubeadm.go:310] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I0914 16:44:56.848797 16836 kubeadm.go:310] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I0914 16:44:56.848849 16836 kubeadm.go:310] [kubelet-start] Starting the kubelet
I0914 16:44:56.919866 16836 kubeadm.go:310] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I0914 16:44:56.919990 16836 kubeadm.go:310] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I0914 16:44:57.421242 16836 kubeadm.go:310] [kubelet-check] The kubelet is healthy after 501.437991ms
I0914 16:44:57.421352 16836 kubeadm.go:310] [api-check] Waiting for a healthy API server. This can take up to 4m0s
I0914 16:45:01.923325 16836 kubeadm.go:310] [api-check] The API server is healthy after 4.502086308s
I0914 16:45:01.934837 16836 kubeadm.go:310] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I0914 16:45:01.945298 16836 kubeadm.go:310] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I0914 16:45:01.964125 16836 kubeadm.go:310] [upload-certs] Skipping phase. Please see --upload-certs
I0914 16:45:01.964409 16836 kubeadm.go:310] [mark-control-plane] Marking the node addons-657043 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I0914 16:45:01.971234 16836 kubeadm.go:310] [bootstrap-token] Using token: efg96o.90x1yxid34ht3wph
I0914 16:45:01.972715 16836 out.go:235] - Configuring RBAC rules ...
I0914 16:45:01.972862 16836 kubeadm.go:310] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I0914 16:45:01.975673 16836 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I0914 16:45:01.981878 16836 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I0914 16:45:01.984053 16836 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I0914 16:45:01.986299 16836 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I0914 16:45:01.988725 16836 kubeadm.go:310] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I0914 16:45:02.331158 16836 kubeadm.go:310] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I0914 16:45:02.751712 16836 kubeadm.go:310] [addons] Applied essential addon: CoreDNS
I0914 16:45:03.330714 16836 kubeadm.go:310] [addons] Applied essential addon: kube-proxy
I0914 16:45:03.331589 16836 kubeadm.go:310]
I0914 16:45:03.331681 16836 kubeadm.go:310] Your Kubernetes control-plane has initialized successfully!
I0914 16:45:03.331690 16836 kubeadm.go:310]
I0914 16:45:03.331772 16836 kubeadm.go:310] To start using your cluster, you need to run the following as a regular user:
I0914 16:45:03.331779 16836 kubeadm.go:310]
I0914 16:45:03.331842 16836 kubeadm.go:310] mkdir -p $HOME/.kube
I0914 16:45:03.331920 16836 kubeadm.go:310] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I0914 16:45:03.331993 16836 kubeadm.go:310] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I0914 16:45:03.332001 16836 kubeadm.go:310]
I0914 16:45:03.332090 16836 kubeadm.go:310] Alternatively, if you are the root user, you can run:
I0914 16:45:03.332100 16836 kubeadm.go:310]
I0914 16:45:03.332157 16836 kubeadm.go:310] export KUBECONFIG=/etc/kubernetes/admin.conf
I0914 16:45:03.332170 16836 kubeadm.go:310]
I0914 16:45:03.332260 16836 kubeadm.go:310] You should now deploy a pod network to the cluster.
I0914 16:45:03.332342 16836 kubeadm.go:310] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I0914 16:45:03.332471 16836 kubeadm.go:310] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I0914 16:45:03.332490 16836 kubeadm.go:310]
I0914 16:45:03.332618 16836 kubeadm.go:310] You can now join any number of control-plane nodes by copying certificate authorities
I0914 16:45:03.332736 16836 kubeadm.go:310] and service account keys on each node and then running the following as root:
I0914 16:45:03.332748 16836 kubeadm.go:310]
I0914 16:45:03.332833 16836 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token efg96o.90x1yxid34ht3wph \
I0914 16:45:03.332923 16836 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:5ce38e0425aebe896fba692a2e827a00ea28fca3ff5501d64ed999ae002606e0 \
I0914 16:45:03.332943 16836 kubeadm.go:310] --control-plane
I0914 16:45:03.332948 16836 kubeadm.go:310]
I0914 16:45:03.333020 16836 kubeadm.go:310] Then you can join any number of worker nodes by running the following on each as root:
I0914 16:45:03.333027 16836 kubeadm.go:310]
I0914 16:45:03.333095 16836 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token efg96o.90x1yxid34ht3wph \
I0914 16:45:03.333190 16836 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:5ce38e0425aebe896fba692a2e827a00ea28fca3ff5501d64ed999ae002606e0
I0914 16:45:03.335218 16836 kubeadm.go:310] W0914 16:44:53.686400 1920 common.go:101] your configuration file uses a deprecated API spec: "kubeadm.k8s.io/v1beta3" (kind: "ClusterConfiguration"). Please use 'kubeadm config migrate --old-config old.yaml --new-config new.yaml', which will write the new, similar spec using a newer API version.
I0914 16:45:03.335502 16836 kubeadm.go:310] W0914 16:44:53.687013 1920 common.go:101] your configuration file uses a deprecated API spec: "kubeadm.k8s.io/v1beta3" (kind: "InitConfiguration"). Please use 'kubeadm config migrate --old-config old.yaml --new-config new.yaml', which will write the new, similar spec using a newer API version.
I0914 16:45:03.335703 16836 kubeadm.go:310] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1068-gcp\n", err: exit status 1
I0914 16:45:03.335924 16836 kubeadm.go:310] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I0914 16:45:03.335946 16836 cni.go:84] Creating CNI manager for ""
I0914 16:45:03.335962 16836 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0914 16:45:03.337738 16836 out.go:177] * Configuring bridge CNI (Container Networking Interface) ...
I0914 16:45:03.339029 16836 ssh_runner.go:195] Run: sudo mkdir -p /etc/cni/net.d
I0914 16:45:03.347138 16836 ssh_runner.go:362] scp memory --> /etc/cni/net.d/1-k8s.conflist (496 bytes)
I0914 16:45:03.363216 16836 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I0914 16:45:03.363338 16836 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I0914 16:45:03.363366 16836 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes addons-657043 minikube.k8s.io/updated_at=2024_09_14T16_45_03_0700 minikube.k8s.io/version=v1.34.0 minikube.k8s.io/commit=fbeeb744274463b05401c917e5ab21bbaf5ef95a minikube.k8s.io/name=addons-657043 minikube.k8s.io/primary=true
I0914 16:45:03.432152 16836 ops.go:34] apiserver oom_adj: -16
I0914 16:45:03.432171 16836 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0914 16:45:03.932826 16836 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0914 16:45:04.432909 16836 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0914 16:45:04.932224 16836 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0914 16:45:05.432605 16836 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0914 16:45:05.932708 16836 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0914 16:45:06.432258 16836 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0914 16:45:06.933222 16836 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0914 16:45:06.997649 16836 kubeadm.go:1113] duration metric: took 3.634373442s to wait for elevateKubeSystemPrivileges
I0914 16:45:06.997686 16836 kubeadm.go:394] duration metric: took 13.444622467s to StartCluster
I0914 16:45:06.997711 16836 settings.go:142] acquiring lock: {Name:mkb942172743e015fe2756d8db792b3e9690a0d9 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0914 16:45:06.997824 16836 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/19643-8643/kubeconfig
I0914 16:45:06.998200 16836 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19643-8643/kubeconfig: {Name:mkcb90a834c978388d45bee86e05e3e31981d787 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0914 16:45:06.998378 16836 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I0914 16:45:06.998393 16836 start.go:235] Will wait 6m0s for node &{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}
I0914 16:45:06.998472 16836 addons.go:507] enable addons start: toEnable=map[ambassador:false auto-pause:false cloud-spanner:true csi-hostpath-driver:true dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:true gvisor:false headlamp:false helm-tiller:true inaccel:false ingress:true ingress-dns:true inspektor-gadget:true istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:true nvidia-device-plugin:true nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:true registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:true volcano:true volumesnapshots:true yakd:true]
I0914 16:45:06.998576 16836 addons.go:69] Setting yakd=true in profile "addons-657043"
I0914 16:45:06.998581 16836 config.go:182] Loaded profile config "addons-657043": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.31.1
I0914 16:45:06.998587 16836 addons.go:69] Setting cloud-spanner=true in profile "addons-657043"
I0914 16:45:06.998595 16836 addons.go:234] Setting addon yakd=true in "addons-657043"
I0914 16:45:06.998600 16836 addons.go:234] Setting addon cloud-spanner=true in "addons-657043"
I0914 16:45:06.998626 16836 addons.go:69] Setting storage-provisioner=true in profile "addons-657043"
I0914 16:45:06.998639 16836 addons.go:69] Setting volumesnapshots=true in profile "addons-657043"
I0914 16:45:06.998637 16836 addons.go:69] Setting metrics-server=true in profile "addons-657043"
I0914 16:45:06.998638 16836 addons.go:69] Setting volcano=true in profile "addons-657043"
I0914 16:45:06.998649 16836 addons.go:234] Setting addon volumesnapshots=true in "addons-657043"
I0914 16:45:06.998653 16836 addons.go:69] Setting nvidia-device-plugin=true in profile "addons-657043"
I0914 16:45:06.998664 16836 addons.go:234] Setting addon nvidia-device-plugin=true in "addons-657043"
I0914 16:45:06.998678 16836 addons.go:69] Setting storage-provisioner-rancher=true in profile "addons-657043"
I0914 16:45:06.998685 16836 addons.go:69] Setting ingress-dns=true in profile "addons-657043"
I0914 16:45:06.998693 16836 addons_storage_classes.go:33] enableOrDisableStorageClasses storage-provisioner-rancher=true on "addons-657043"
I0914 16:45:06.998697 16836 host.go:66] Checking if "addons-657043" exists ...
I0914 16:45:06.998698 16836 addons.go:69] Setting registry=true in profile "addons-657043"
I0914 16:45:06.998707 16836 addons.go:69] Setting ingress=true in profile "addons-657043"
I0914 16:45:06.998720 16836 addons.go:234] Setting addon ingress=true in "addons-657043"
I0914 16:45:06.998628 16836 host.go:66] Checking if "addons-657043" exists ...
I0914 16:45:06.998752 16836 host.go:66] Checking if "addons-657043" exists ...
I0914 16:45:06.998644 16836 addons.go:234] Setting addon storage-provisioner=true in "addons-657043"
I0914 16:45:06.998821 16836 host.go:66] Checking if "addons-657043" exists ...
I0914 16:45:06.998709 16836 addons.go:234] Setting addon registry=true in "addons-657043"
I0914 16:45:06.998901 16836 host.go:66] Checking if "addons-657043" exists ...
I0914 16:45:06.998673 16836 addons.go:69] Setting helm-tiller=true in profile "addons-657043"
I0914 16:45:06.998975 16836 addons.go:234] Setting addon helm-tiller=true in "addons-657043"
I0914 16:45:06.999002 16836 host.go:66] Checking if "addons-657043" exists ...
I0914 16:45:06.999034 16836 cli_runner.go:164] Run: docker container inspect addons-657043 --format={{.State.Status}}
I0914 16:45:06.998697 16836 addons.go:69] Setting default-storageclass=true in profile "addons-657043"
I0914 16:45:06.999055 16836 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "addons-657043"
I0914 16:45:06.999199 16836 cli_runner.go:164] Run: docker container inspect addons-657043 --format={{.State.Status}}
I0914 16:45:06.999202 16836 cli_runner.go:164] Run: docker container inspect addons-657043 --format={{.State.Status}}
I0914 16:45:06.999284 16836 cli_runner.go:164] Run: docker container inspect addons-657043 --format={{.State.Status}}
I0914 16:45:06.999295 16836 cli_runner.go:164] Run: docker container inspect addons-657043 --format={{.State.Status}}
I0914 16:45:06.999331 16836 cli_runner.go:164] Run: docker container inspect addons-657043 --format={{.State.Status}}
I0914 16:45:06.999399 16836 cli_runner.go:164] Run: docker container inspect addons-657043 --format={{.State.Status}}
I0914 16:45:06.999431 16836 cli_runner.go:164] Run: docker container inspect addons-657043 --format={{.State.Status}}
I0914 16:45:06.998679 16836 addons.go:234] Setting addon metrics-server=true in "addons-657043"
I0914 16:45:06.999954 16836 host.go:66] Checking if "addons-657043" exists ...
I0914 16:45:06.998669 16836 host.go:66] Checking if "addons-657043" exists ...
I0914 16:45:07.000515 16836 cli_runner.go:164] Run: docker container inspect addons-657043 --format={{.State.Status}}
I0914 16:45:07.000679 16836 cli_runner.go:164] Run: docker container inspect addons-657043 --format={{.State.Status}}
I0914 16:45:06.998574 16836 addons.go:69] Setting csi-hostpath-driver=true in profile "addons-657043"
I0914 16:45:07.000918 16836 addons.go:234] Setting addon csi-hostpath-driver=true in "addons-657043"
I0914 16:45:07.000988 16836 host.go:66] Checking if "addons-657043" exists ...
I0914 16:45:06.998671 16836 host.go:66] Checking if "addons-657043" exists ...
I0914 16:45:07.001356 16836 out.go:177] * Verifying Kubernetes components...
I0914 16:45:07.001513 16836 cli_runner.go:164] Run: docker container inspect addons-657043 --format={{.State.Status}}
I0914 16:45:06.998688 16836 addons.go:69] Setting inspektor-gadget=true in profile "addons-657043"
I0914 16:45:07.001874 16836 addons.go:234] Setting addon inspektor-gadget=true in "addons-657043"
I0914 16:45:07.001906 16836 host.go:66] Checking if "addons-657043" exists ...
I0914 16:45:06.998700 16836 addons.go:234] Setting addon ingress-dns=true in "addons-657043"
I0914 16:45:07.002248 16836 host.go:66] Checking if "addons-657043" exists ...
I0914 16:45:06.998696 16836 addons.go:234] Setting addon volcano=true in "addons-657043"
I0914 16:45:07.002368 16836 host.go:66] Checking if "addons-657043" exists ...
I0914 16:45:06.998707 16836 addons.go:69] Setting gcp-auth=true in profile "addons-657043"
I0914 16:45:07.002509 16836 mustload.go:65] Loading cluster: addons-657043
I0914 16:45:07.003432 16836 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0914 16:45:07.028886 16836 cli_runner.go:164] Run: docker container inspect addons-657043 --format={{.State.Status}}
I0914 16:45:07.029206 16836 cli_runner.go:164] Run: docker container inspect addons-657043 --format={{.State.Status}}
I0914 16:45:07.029365 16836 cli_runner.go:164] Run: docker container inspect addons-657043 --format={{.State.Status}}
I0914 16:45:07.029768 16836 cli_runner.go:164] Run: docker container inspect addons-657043 --format={{.State.Status}}
I0914 16:45:07.030261 16836 config.go:182] Loaded profile config "addons-657043": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.31.1
I0914 16:45:07.030528 16836 cli_runner.go:164] Run: docker container inspect addons-657043 --format={{.State.Status}}
I0914 16:45:07.033391 16836 out.go:177] - Using image nvcr.io/nvidia/k8s-device-plugin:v0.16.2
I0914 16:45:07.035251 16836 addons.go:431] installing /etc/kubernetes/addons/nvidia-device-plugin.yaml
I0914 16:45:07.035276 16836 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/nvidia-device-plugin.yaml (1966 bytes)
I0914 16:45:07.035339 16836 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-657043
I0914 16:45:07.035661 16836 out.go:177] - Using image registry.k8s.io/ingress-nginx/controller:v1.11.2
I0914 16:45:07.037136 16836 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.3
I0914 16:45:07.043362 16836 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.3
I0914 16:45:07.049501 16836 addons.go:234] Setting addon storage-provisioner-rancher=true in "addons-657043"
I0914 16:45:07.049566 16836 host.go:66] Checking if "addons-657043" exists ...
I0914 16:45:07.050048 16836 cli_runner.go:164] Run: docker container inspect addons-657043 --format={{.State.Status}}
I0914 16:45:07.053119 16836 out.go:177] - Using image registry.k8s.io/metrics-server/metrics-server:v0.7.2
I0914 16:45:07.053418 16836 addons.go:431] installing /etc/kubernetes/addons/ingress-deploy.yaml
I0914 16:45:07.053451 16836 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ingress-deploy.yaml (16078 bytes)
I0914 16:45:07.053511 16836 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-657043
I0914 16:45:07.054416 16836 addons.go:431] installing /etc/kubernetes/addons/metrics-apiservice.yaml
I0914 16:45:07.054440 16836 ssh_runner.go:362] scp metrics-server/metrics-apiservice.yaml --> /etc/kubernetes/addons/metrics-apiservice.yaml (424 bytes)
I0914 16:45:07.054502 16836 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-657043
I0914 16:45:07.057640 16836 out.go:177] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I0914 16:45:07.059603 16836 addons.go:431] installing /etc/kubernetes/addons/storage-provisioner.yaml
I0914 16:45:07.059629 16836 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I0914 16:45:07.059687 16836 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-657043
I0914 16:45:07.064774 16836 out.go:177] - Using image gcr.io/cloud-spanner-emulator/emulator:1.5.23
I0914 16:45:07.066607 16836 addons.go:431] installing /etc/kubernetes/addons/deployment.yaml
I0914 16:45:07.066629 16836 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/deployment.yaml (1004 bytes)
I0914 16:45:07.066692 16836 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-657043
I0914 16:45:07.098674 16836 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19643-8643/.minikube/machines/addons-657043/id_rsa Username:docker}
I0914 16:45:07.099521 16836 host.go:66] Checking if "addons-657043" exists ...
I0914 16:45:07.100482 16836 out.go:177] - Using image docker.io/volcanosh/vc-webhook-manager:v1.9.0
I0914 16:45:07.101623 16836 addons.go:234] Setting addon default-storageclass=true in "addons-657043"
I0914 16:45:07.101667 16836 host.go:66] Checking if "addons-657043" exists ...
I0914 16:45:07.102290 16836 cli_runner.go:164] Run: docker container inspect addons-657043 --format={{.State.Status}}
I0914 16:45:07.103311 16836 out.go:177] - Using image docker.io/volcanosh/vc-controller-manager:v1.9.0
I0914 16:45:07.107848 16836 out.go:177] - Using image docker.io/volcanosh/vc-scheduler:v1.9.0
I0914 16:45:07.110660 16836 addons.go:431] installing /etc/kubernetes/addons/volcano-deployment.yaml
I0914 16:45:07.110684 16836 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/volcano-deployment.yaml (434001 bytes)
I0914 16:45:07.110744 16836 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-657043
I0914 16:45:07.118962 16836 out.go:177] - Using image registry.k8s.io/sig-storage/snapshot-controller:v6.1.0
I0914 16:45:07.120404 16836 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml
I0914 16:45:07.120426 16836 ssh_runner.go:362] scp volumesnapshots/csi-hostpath-snapshotclass.yaml --> /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml (934 bytes)
I0914 16:45:07.120584 16836 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-657043
I0914 16:45:07.123807 16836 out.go:177] - Using image ghcr.io/helm/tiller:v2.17.0
I0914 16:45:07.125190 16836 addons.go:431] installing /etc/kubernetes/addons/helm-tiller-dp.yaml
I0914 16:45:07.125210 16836 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/helm-tiller-dp.yaml (2422 bytes)
I0914 16:45:07.125269 16836 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-657043
I0914 16:45:07.127097 16836 out.go:177] - Using image docker.io/marcnuri/yakd:0.0.5
I0914 16:45:07.127091 16836 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19643-8643/.minikube/machines/addons-657043/id_rsa Username:docker}
I0914 16:45:07.128861 16836 addons.go:431] installing /etc/kubernetes/addons/yakd-ns.yaml
I0914 16:45:07.128883 16836 ssh_runner.go:362] scp yakd/yakd-ns.yaml --> /etc/kubernetes/addons/yakd-ns.yaml (171 bytes)
I0914 16:45:07.129025 16836 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-657043
I0914 16:45:07.130957 16836 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19643-8643/.minikube/machines/addons-657043/id_rsa Username:docker}
I0914 16:45:07.134691 16836 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19643-8643/.minikube/machines/addons-657043/id_rsa Username:docker}
I0914 16:45:07.137214 16836 out.go:177] - Using image ghcr.io/inspektor-gadget/inspektor-gadget:v0.32.0
I0914 16:45:07.137280 16836 out.go:177] - Using image registry.k8s.io/sig-storage/csi-attacher:v4.0.0
I0914 16:45:07.137342 16836 out.go:177] - Using image docker.io/busybox:stable
I0914 16:45:07.138467 16836 addons.go:431] installing /etc/kubernetes/addons/ig-namespace.yaml
I0914 16:45:07.138488 16836 ssh_runner.go:362] scp inspektor-gadget/ig-namespace.yaml --> /etc/kubernetes/addons/ig-namespace.yaml (55 bytes)
I0914 16:45:07.138547 16836 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-657043
I0914 16:45:07.140033 16836 out.go:177] - Using image docker.io/rancher/local-path-provisioner:v0.0.22
I0914 16:45:07.140139 16836 out.go:177] - Using image registry.k8s.io/sig-storage/csi-external-health-monitor-controller:v0.7.0
I0914 16:45:07.141564 16836 addons.go:431] installing /etc/kubernetes/addons/storage-provisioner-rancher.yaml
I0914 16:45:07.141584 16836 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner-rancher.yaml (3113 bytes)
I0914 16:45:07.141641 16836 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-657043
I0914 16:45:07.143365 16836 out.go:177] - Using image registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.6.0
I0914 16:45:07.143532 16836 addons.go:431] installing /etc/kubernetes/addons/storageclass.yaml
I0914 16:45:07.143552 16836 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I0914 16:45:07.143602 16836 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-657043
I0914 16:45:07.148030 16836 out.go:177] - Using image registry.k8s.io/sig-storage/hostpathplugin:v1.9.0
I0914 16:45:07.149156 16836 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19643-8643/.minikube/machines/addons-657043/id_rsa Username:docker}
I0914 16:45:07.151183 16836 out.go:177] - Using image registry.k8s.io/sig-storage/livenessprobe:v2.8.0
I0914 16:45:07.151399 16836 out.go:177] - Using image gcr.io/k8s-minikube/kube-registry-proxy:0.0.6
I0914 16:45:07.152491 16836 out.go:177] - Using image registry.k8s.io/sig-storage/csi-resizer:v1.6.0
I0914 16:45:07.153835 16836 out.go:177] - Using image docker.io/registry:2.8.3
I0914 16:45:07.153923 16836 out.go:177] - Using image registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0
I0914 16:45:07.155170 16836 addons.go:431] installing /etc/kubernetes/addons/registry-rc.yaml
I0914 16:45:07.155188 16836 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-rc.yaml (860 bytes)
I0914 16:45:07.155244 16836 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-657043
I0914 16:45:07.156869 16836 out.go:177] - Using image registry.k8s.io/sig-storage/csi-provisioner:v3.3.0
I0914 16:45:07.157911 16836 addons.go:431] installing /etc/kubernetes/addons/rbac-external-attacher.yaml
I0914 16:45:07.157929 16836 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-attacher.yaml --> /etc/kubernetes/addons/rbac-external-attacher.yaml (3073 bytes)
I0914 16:45:07.157992 16836 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-657043
I0914 16:45:07.160426 16836 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I0914 16:45:07.164024 16836 out.go:177] - Using image gcr.io/k8s-minikube/minikube-ingress-dns:0.0.3
I0914 16:45:07.166302 16836 addons.go:431] installing /etc/kubernetes/addons/ingress-dns-pod.yaml
I0914 16:45:07.166325 16836 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ingress-dns-pod.yaml (2442 bytes)
I0914 16:45:07.166386 16836 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-657043
I0914 16:45:07.177260 16836 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19643-8643/.minikube/machines/addons-657043/id_rsa Username:docker}
I0914 16:45:07.182345 16836 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19643-8643/.minikube/machines/addons-657043/id_rsa Username:docker}
I0914 16:45:07.184808 16836 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19643-8643/.minikube/machines/addons-657043/id_rsa Username:docker}
I0914 16:45:07.186980 16836 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19643-8643/.minikube/machines/addons-657043/id_rsa Username:docker}
I0914 16:45:07.188287 16836 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19643-8643/.minikube/machines/addons-657043/id_rsa Username:docker}
I0914 16:45:07.193605 16836 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19643-8643/.minikube/machines/addons-657043/id_rsa Username:docker}
I0914 16:45:07.194316 16836 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19643-8643/.minikube/machines/addons-657043/id_rsa Username:docker}
I0914 16:45:07.195436 16836 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19643-8643/.minikube/machines/addons-657043/id_rsa Username:docker}
I0914 16:45:07.196454 16836 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19643-8643/.minikube/machines/addons-657043/id_rsa Username:docker}
I0914 16:45:07.201827 16836 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19643-8643/.minikube/machines/addons-657043/id_rsa Username:docker}
I0914 16:45:07.226611 16836 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0914 16:45:07.528530 16836 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ingress-deploy.yaml
I0914 16:45:07.534493 16836 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I0914 16:45:07.535072 16836 addons.go:431] installing /etc/kubernetes/addons/metrics-server-deployment.yaml
I0914 16:45:07.535091 16836 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-deployment.yaml (1907 bytes)
I0914 16:45:07.618545 16836 addons.go:431] installing /etc/kubernetes/addons/rbac-hostpath.yaml
I0914 16:45:07.618642 16836 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-hostpath.yaml --> /etc/kubernetes/addons/rbac-hostpath.yaml (4266 bytes)
I0914 16:45:07.630270 16836 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/nvidia-device-plugin.yaml
I0914 16:45:07.635563 16836 addons.go:431] installing /etc/kubernetes/addons/yakd-sa.yaml
I0914 16:45:07.635652 16836 ssh_runner.go:362] scp yakd/yakd-sa.yaml --> /etc/kubernetes/addons/yakd-sa.yaml (247 bytes)
I0914 16:45:07.713704 16836 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/volcano-deployment.yaml
I0914 16:45:07.720913 16836 addons.go:431] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml
I0914 16:45:07.721009 16836 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshotclasses.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml (6471 bytes)
I0914 16:45:07.830449 16836 addons.go:431] installing /etc/kubernetes/addons/helm-tiller-rbac.yaml
I0914 16:45:07.830546 16836 ssh_runner.go:362] scp helm-tiller/helm-tiller-rbac.yaml --> /etc/kubernetes/addons/helm-tiller-rbac.yaml (1188 bytes)
I0914 16:45:07.833243 16836 addons.go:431] installing /etc/kubernetes/addons/yakd-crb.yaml
I0914 16:45:07.833326 16836 ssh_runner.go:362] scp yakd/yakd-crb.yaml --> /etc/kubernetes/addons/yakd-crb.yaml (422 bytes)
I0914 16:45:07.835586 16836 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ingress-dns-pod.yaml
I0914 16:45:07.913345 16836 addons.go:431] installing /etc/kubernetes/addons/registry-svc.yaml
I0914 16:45:07.913425 16836 ssh_runner.go:362] scp registry/registry-svc.yaml --> /etc/kubernetes/addons/registry-svc.yaml (398 bytes)
I0914 16:45:07.920419 16836 addons.go:431] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml
I0914 16:45:07.920499 16836 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshotcontents.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml (23126 bytes)
I0914 16:45:07.927789 16836 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/deployment.yaml
I0914 16:45:07.929165 16836 addons.go:431] installing /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml
I0914 16:45:07.929223 16836 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-health-monitor-controller.yaml --> /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml (3038 bytes)
I0914 16:45:07.933451 16836 addons.go:431] installing /etc/kubernetes/addons/ig-serviceaccount.yaml
I0914 16:45:07.933528 16836 ssh_runner.go:362] scp inspektor-gadget/ig-serviceaccount.yaml --> /etc/kubernetes/addons/ig-serviceaccount.yaml (80 bytes)
I0914 16:45:07.936510 16836 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner-rancher.yaml
I0914 16:45:08.013758 16836 addons.go:431] installing /etc/kubernetes/addons/metrics-server-rbac.yaml
I0914 16:45:08.013875 16836 ssh_runner.go:362] scp metrics-server/metrics-server-rbac.yaml --> /etc/kubernetes/addons/metrics-server-rbac.yaml (2175 bytes)
I0914 16:45:08.019823 16836 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I0914 16:45:08.127012 16836 addons.go:431] installing /etc/kubernetes/addons/rbac-external-provisioner.yaml
I0914 16:45:08.127100 16836 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-provisioner.yaml --> /etc/kubernetes/addons/rbac-external-provisioner.yaml (4442 bytes)
I0914 16:45:08.215896 16836 addons.go:431] installing /etc/kubernetes/addons/helm-tiller-svc.yaml
I0914 16:45:08.215923 16836 ssh_runner.go:362] scp helm-tiller/helm-tiller-svc.yaml --> /etc/kubernetes/addons/helm-tiller-svc.yaml (951 bytes)
I0914 16:45:08.317384 16836 addons.go:431] installing /etc/kubernetes/addons/metrics-server-service.yaml
I0914 16:45:08.317414 16836 ssh_runner.go:362] scp metrics-server/metrics-server-service.yaml --> /etc/kubernetes/addons/metrics-server-service.yaml (446 bytes)
I0914 16:45:08.329113 16836 addons.go:431] installing /etc/kubernetes/addons/ig-role.yaml
I0914 16:45:08.329140 16836 ssh_runner.go:362] scp inspektor-gadget/ig-role.yaml --> /etc/kubernetes/addons/ig-role.yaml (210 bytes)
I0914 16:45:08.332345 16836 addons.go:431] installing /etc/kubernetes/addons/registry-proxy.yaml
I0914 16:45:08.332420 16836 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-proxy.yaml (947 bytes)
I0914 16:45:08.416679 16836 addons.go:431] installing /etc/kubernetes/addons/yakd-svc.yaml
I0914 16:45:08.416768 16836 ssh_runner.go:362] scp yakd/yakd-svc.yaml --> /etc/kubernetes/addons/yakd-svc.yaml (412 bytes)
I0914 16:45:08.613676 16836 addons.go:431] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml
I0914 16:45:08.613763 16836 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshots.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml (19582 bytes)
I0914 16:45:08.620152 16836 addons.go:431] installing /etc/kubernetes/addons/rbac-external-resizer.yaml
I0914 16:45:08.620225 16836 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-resizer.yaml --> /etc/kubernetes/addons/rbac-external-resizer.yaml (2943 bytes)
I0914 16:45:08.715232 16836 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/helm-tiller-dp.yaml -f /etc/kubernetes/addons/helm-tiller-rbac.yaml -f /etc/kubernetes/addons/helm-tiller-svc.yaml
I0914 16:45:08.816319 16836 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/registry-rc.yaml -f /etc/kubernetes/addons/registry-svc.yaml -f /etc/kubernetes/addons/registry-proxy.yaml
I0914 16:45:08.925192 16836 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
I0914 16:45:08.933010 16836 addons.go:431] installing /etc/kubernetes/addons/yakd-dp.yaml
I0914 16:45:08.933094 16836 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/yakd-dp.yaml (2017 bytes)
I0914 16:45:09.031488 16836 ssh_runner.go:235] Completed: sudo systemctl start kubelet: (1.804819059s)
I0914 16:45:09.032162 16836 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -": (1.871707362s)
I0914 16:45:09.032331 16836 start.go:971] {"host.minikube.internal": 192.168.49.1} host record injected into CoreDNS's ConfigMap
I0914 16:45:09.033611 16836 node_ready.go:35] waiting up to 6m0s for node "addons-657043" to be "Ready" ...
I0914 16:45:09.118030 16836 addons.go:431] installing /etc/kubernetes/addons/ig-rolebinding.yaml
I0914 16:45:09.118117 16836 ssh_runner.go:362] scp inspektor-gadget/ig-rolebinding.yaml --> /etc/kubernetes/addons/ig-rolebinding.yaml (244 bytes)
I0914 16:45:09.121115 16836 node_ready.go:49] node "addons-657043" has status "Ready":"True"
I0914 16:45:09.121201 16836 node_ready.go:38] duration metric: took 87.441514ms for node "addons-657043" to be "Ready" ...
I0914 16:45:09.121228 16836 pod_ready.go:36] extra waiting up to 6m0s for all system-critical pods including labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0914 16:45:09.134066 16836 pod_ready.go:79] waiting up to 6m0s for pod "coredns-7c65d6cfc9-fhkpr" in "kube-system" namespace to be "Ready" ...
I0914 16:45:09.316489 16836 addons.go:431] installing /etc/kubernetes/addons/rbac-external-snapshotter.yaml
I0914 16:45:09.316569 16836 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-snapshotter.yaml --> /etc/kubernetes/addons/rbac-external-snapshotter.yaml (3149 bytes)
I0914 16:45:09.435344 16836 addons.go:431] installing /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml
I0914 16:45:09.435375 16836 ssh_runner.go:362] scp volumesnapshots/rbac-volume-snapshot-controller.yaml --> /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml (3545 bytes)
I0914 16:45:09.513675 16836 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/yakd-ns.yaml -f /etc/kubernetes/addons/yakd-sa.yaml -f /etc/kubernetes/addons/yakd-crb.yaml -f /etc/kubernetes/addons/yakd-svc.yaml -f /etc/kubernetes/addons/yakd-dp.yaml
I0914 16:45:09.615107 16836 kapi.go:214] "coredns" deployment in "kube-system" namespace and "addons-657043" context rescaled to 1 replicas
I0914 16:45:09.634904 16836 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-attacher.yaml
I0914 16:45:09.634932 16836 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-attacher.yaml (2143 bytes)
I0914 16:45:10.237393 16836 addons.go:431] installing /etc/kubernetes/addons/ig-clusterrole.yaml
I0914 16:45:10.237422 16836 ssh_runner.go:362] scp inspektor-gadget/ig-clusterrole.yaml --> /etc/kubernetes/addons/ig-clusterrole.yaml (1485 bytes)
I0914 16:45:10.427098 16836 addons.go:431] installing /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I0914 16:45:10.427175 16836 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml (1475 bytes)
I0914 16:45:10.516230 16836 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml
I0914 16:45:10.516316 16836 ssh_runner.go:362] scp csi-hostpath-driver/deploy/csi-hostpath-driverinfo.yaml --> /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml (1274 bytes)
I0914 16:45:10.815431 16836 addons.go:431] installing /etc/kubernetes/addons/ig-clusterrolebinding.yaml
I0914 16:45:10.815533 16836 ssh_runner.go:362] scp inspektor-gadget/ig-clusterrolebinding.yaml --> /etc/kubernetes/addons/ig-clusterrolebinding.yaml (274 bytes)
I0914 16:45:11.017906 16836 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-plugin.yaml
I0914 16:45:11.017938 16836 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-plugin.yaml (8201 bytes)
I0914 16:45:11.025653 16836 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I0914 16:45:11.133227 16836 addons.go:431] installing /etc/kubernetes/addons/ig-crd.yaml
I0914 16:45:11.133318 16836 ssh_runner.go:362] scp inspektor-gadget/ig-crd.yaml --> /etc/kubernetes/addons/ig-crd.yaml (5216 bytes)
I0914 16:45:11.229106 16836 pod_ready.go:103] pod "coredns-7c65d6cfc9-fhkpr" in "kube-system" namespace has status "Ready":"False"
I0914 16:45:11.416670 16836 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-resizer.yaml
I0914 16:45:11.416703 16836 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-resizer.yaml (2191 bytes)
I0914 16:45:11.715766 16836 addons.go:431] installing /etc/kubernetes/addons/ig-daemonset.yaml
I0914 16:45:11.715796 16836 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-daemonset.yaml (7735 bytes)
I0914 16:45:12.316570 16836 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-storageclass.yaml
I0914 16:45:12.316657 16836 ssh_runner.go:362] scp csi-hostpath-driver/deploy/csi-hostpath-storageclass.yaml --> /etc/kubernetes/addons/csi-hostpath-storageclass.yaml (846 bytes)
I0914 16:45:12.321191 16836 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ig-namespace.yaml -f /etc/kubernetes/addons/ig-serviceaccount.yaml -f /etc/kubernetes/addons/ig-role.yaml -f /etc/kubernetes/addons/ig-rolebinding.yaml -f /etc/kubernetes/addons/ig-clusterrole.yaml -f /etc/kubernetes/addons/ig-clusterrolebinding.yaml -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-daemonset.yaml
I0914 16:45:13.023332 16836 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/rbac-external-attacher.yaml -f /etc/kubernetes/addons/rbac-hostpath.yaml -f /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml -f /etc/kubernetes/addons/rbac-external-provisioner.yaml -f /etc/kubernetes/addons/rbac-external-resizer.yaml -f /etc/kubernetes/addons/rbac-external-snapshotter.yaml -f /etc/kubernetes/addons/csi-hostpath-attacher.yaml -f /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml -f /etc/kubernetes/addons/csi-hostpath-plugin.yaml -f /etc/kubernetes/addons/csi-hostpath-resizer.yaml -f /etc/kubernetes/addons/csi-hostpath-storageclass.yaml
I0914 16:45:13.721417 16836 pod_ready.go:103] pod "coredns-7c65d6cfc9-fhkpr" in "kube-system" namespace has status "Ready":"False"
I0914 16:45:14.123363 16836 ssh_runner.go:362] scp memory --> /var/lib/minikube/google_application_credentials.json (162 bytes)
I0914 16:45:14.123537 16836 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-657043
I0914 16:45:14.147804 16836 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19643-8643/.minikube/machines/addons-657043/id_rsa Username:docker}
I0914 16:45:14.923184 16836 ssh_runner.go:362] scp memory --> /var/lib/minikube/google_cloud_project (12 bytes)
I0914 16:45:15.335885 16836 addons.go:234] Setting addon gcp-auth=true in "addons-657043"
I0914 16:45:15.335998 16836 host.go:66] Checking if "addons-657043" exists ...
I0914 16:45:15.336544 16836 cli_runner.go:164] Run: docker container inspect addons-657043 --format={{.State.Status}}
I0914 16:45:15.354086 16836 ssh_runner.go:195] Run: cat /var/lib/minikube/google_application_credentials.json
I0914 16:45:15.354127 16836 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-657043
I0914 16:45:15.370067 16836 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19643-8643/.minikube/machines/addons-657043/id_rsa Username:docker}
I0914 16:45:16.218931 16836 pod_ready.go:103] pod "coredns-7c65d6cfc9-fhkpr" in "kube-system" namespace has status "Ready":"False"
I0914 16:45:17.129244 16836 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ingress-deploy.yaml: (9.60067472s)
I0914 16:45:17.129299 16836 addons.go:475] Verifying addon ingress=true in "addons-657043"
I0914 16:45:17.129362 16836 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (9.59483405s)
I0914 16:45:17.129431 16836 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/nvidia-device-plugin.yaml: (9.499131924s)
I0914 16:45:17.132714 16836 out.go:177] * Verifying ingress addon...
I0914 16:45:17.135560 16836 kapi.go:75] Waiting for pod with label "app.kubernetes.io/name=ingress-nginx" in ns "ingress-nginx" ...
I0914 16:45:17.220721 16836 kapi.go:86] Found 3 Pods for label selector app.kubernetes.io/name=ingress-nginx
I0914 16:45:17.220753 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:17.716198 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:18.220903 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:18.222373 16836 pod_ready.go:103] pod "coredns-7c65d6cfc9-fhkpr" in "kube-system" namespace has status "Ready":"False"
I0914 16:45:18.641196 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:19.324509 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:19.721235 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:20.220761 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:20.223930 16836 pod_ready.go:103] pod "coredns-7c65d6cfc9-fhkpr" in "kube-system" namespace has status "Ready":"False"
I0914 16:45:20.425368 16836 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/volcano-deployment.yaml: (12.711566436s)
I0914 16:45:20.425585 16836 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ingress-dns-pod.yaml: (12.589973672s)
I0914 16:45:20.425676 16836 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/deployment.yaml: (12.497808428s)
I0914 16:45:20.425796 16836 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner-rancher.yaml: (12.489261723s)
I0914 16:45:20.426092 16836 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: (12.406176721s)
I0914 16:45:20.426320 16836 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/helm-tiller-dp.yaml -f /etc/kubernetes/addons/helm-tiller-rbac.yaml -f /etc/kubernetes/addons/helm-tiller-svc.yaml: (11.71099994s)
I0914 16:45:20.426404 16836 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/registry-rc.yaml -f /etc/kubernetes/addons/registry-svc.yaml -f /etc/kubernetes/addons/registry-proxy.yaml: (11.609989174s)
I0914 16:45:20.426470 16836 addons.go:475] Verifying addon registry=true in "addons-657043"
I0914 16:45:20.426913 16836 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: (11.501622217s)
I0914 16:45:20.427287 16836 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ig-namespace.yaml -f /etc/kubernetes/addons/ig-serviceaccount.yaml -f /etc/kubernetes/addons/ig-role.yaml -f /etc/kubernetes/addons/ig-rolebinding.yaml -f /etc/kubernetes/addons/ig-clusterrole.yaml -f /etc/kubernetes/addons/ig-clusterrolebinding.yaml -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-daemonset.yaml: (8.106002316s)
I0914 16:45:20.427170 16836 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: (9.401474818s)
W0914 16:45:20.427411 16836 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: Process exited with status 1
stdout:
customresourcedefinition.apiextensions.k8s.io/volumesnapshotclasses.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshotcontents.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshots.snapshot.storage.k8s.io created
serviceaccount/snapshot-controller created
clusterrole.rbac.authorization.k8s.io/snapshot-controller-runner created
clusterrolebinding.rbac.authorization.k8s.io/snapshot-controller-role created
role.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
rolebinding.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
deployment.apps/snapshot-controller created
stderr:
error: resource mapping not found for name: "csi-hostpath-snapclass" namespace: "" from "/etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml": no matches for kind "VolumeSnapshotClass" in version "snapshot.storage.k8s.io/v1"
ensure CRDs are installed first
I0914 16:45:20.427438 16836 retry.go:31] will retry after 292.031745ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: Process exited with status 1
stdout:
customresourcedefinition.apiextensions.k8s.io/volumesnapshotclasses.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshotcontents.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshots.snapshot.storage.k8s.io created
serviceaccount/snapshot-controller created
clusterrole.rbac.authorization.k8s.io/snapshot-controller-runner created
clusterrolebinding.rbac.authorization.k8s.io/snapshot-controller-role created
role.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
rolebinding.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
deployment.apps/snapshot-controller created
stderr:
error: resource mapping not found for name: "csi-hostpath-snapclass" namespace: "" from "/etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml": no matches for kind "VolumeSnapshotClass" in version "snapshot.storage.k8s.io/v1"
ensure CRDs are installed first
I0914 16:45:20.427211 16836 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/yakd-ns.yaml -f /etc/kubernetes/addons/yakd-sa.yaml -f /etc/kubernetes/addons/yakd-crb.yaml -f /etc/kubernetes/addons/yakd-svc.yaml -f /etc/kubernetes/addons/yakd-dp.yaml: (10.91331095s)
I0914 16:45:20.427686 16836 addons.go:475] Verifying addon metrics-server=true in "addons-657043"
I0914 16:45:20.428411 16836 out.go:177] * Verifying registry addon...
I0914 16:45:20.429293 16836 out.go:177] * To access YAKD - Kubernetes Dashboard, wait for Pod to be ready and run the following command:
minikube -p addons-657043 service yakd-dashboard -n yakd-dashboard
I0914 16:45:20.431141 16836 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=registry" in ns "kube-system" ...
I0914 16:45:20.435790 16836 kapi.go:86] Found 2 Pods for label selector kubernetes.io/minikube-addons=registry
I0914 16:45:20.435867 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
W0914 16:45:20.513390 16836 out.go:270] ! Enabling 'storage-provisioner-rancher' returned an error: running callbacks: [Error making local-path the default storage class: Error while marking storage class local-path as default: Operation cannot be fulfilled on storageclasses.storage.k8s.io "local-path": the object has been modified; please apply your changes to the latest version and try again]
I0914 16:45:20.719762 16836 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply --force -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I0914 16:45:20.722418 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:20.937691 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:21.142284 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:21.434140 16836 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/rbac-external-attacher.yaml -f /etc/kubernetes/addons/rbac-hostpath.yaml -f /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml -f /etc/kubernetes/addons/rbac-external-provisioner.yaml -f /etc/kubernetes/addons/rbac-external-resizer.yaml -f /etc/kubernetes/addons/rbac-external-snapshotter.yaml -f /etc/kubernetes/addons/csi-hostpath-attacher.yaml -f /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml -f /etc/kubernetes/addons/csi-hostpath-plugin.yaml -f /etc/kubernetes/addons/csi-hostpath-resizer.yaml -f /etc/kubernetes/addons/csi-hostpath-storageclass.yaml: (8.410746993s)
I0914 16:45:21.434191 16836 addons.go:475] Verifying addon csi-hostpath-driver=true in "addons-657043"
I0914 16:45:21.434216 16836 ssh_runner.go:235] Completed: cat /var/lib/minikube/google_application_credentials.json: (6.080102908s)
I0914 16:45:21.436552 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:21.436819 16836 out.go:177] * Verifying csi-hostpath-driver addon...
I0914 16:45:21.436820 16836 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.3
I0914 16:45:21.438322 16836 out.go:177] - Using image gcr.io/k8s-minikube/gcp-auth-webhook:v0.1.2
I0914 16:45:21.439203 16836 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=csi-hostpath-driver" in ns "kube-system" ...
I0914 16:45:21.440790 16836 addons.go:431] installing /etc/kubernetes/addons/gcp-auth-ns.yaml
I0914 16:45:21.440851 16836 ssh_runner.go:362] scp gcp-auth/gcp-auth-ns.yaml --> /etc/kubernetes/addons/gcp-auth-ns.yaml (700 bytes)
I0914 16:45:21.446227 16836 kapi.go:86] Found 3 Pods for label selector kubernetes.io/minikube-addons=csi-hostpath-driver
I0914 16:45:21.446258 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:21.627995 16836 addons.go:431] installing /etc/kubernetes/addons/gcp-auth-service.yaml
I0914 16:45:21.628022 16836 ssh_runner.go:362] scp gcp-auth/gcp-auth-service.yaml --> /etc/kubernetes/addons/gcp-auth-service.yaml (788 bytes)
I0914 16:45:21.639751 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:21.716993 16836 addons.go:431] installing /etc/kubernetes/addons/gcp-auth-webhook.yaml
I0914 16:45:21.717018 16836 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/gcp-auth-webhook.yaml (5421 bytes)
I0914 16:45:21.741979 16836 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/gcp-auth-ns.yaml -f /etc/kubernetes/addons/gcp-auth-service.yaml -f /etc/kubernetes/addons/gcp-auth-webhook.yaml
I0914 16:45:21.935501 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:21.944896 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:22.140468 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:22.435762 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:22.444987 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:22.640093 16836 pod_ready.go:103] pod "coredns-7c65d6cfc9-fhkpr" in "kube-system" namespace has status "Ready":"False"
I0914 16:45:22.640140 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:22.935475 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:22.943795 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:23.012929 16836 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply --force -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: (2.293112029s)
I0914 16:45:23.143646 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:23.254206 16836 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/gcp-auth-ns.yaml -f /etc/kubernetes/addons/gcp-auth-service.yaml -f /etc/kubernetes/addons/gcp-auth-webhook.yaml: (1.512187735s)
I0914 16:45:23.256397 16836 addons.go:475] Verifying addon gcp-auth=true in "addons-657043"
I0914 16:45:23.258224 16836 out.go:177] * Verifying gcp-auth addon...
I0914 16:45:23.262639 16836 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=gcp-auth" in ns "gcp-auth" ...
I0914 16:45:23.314981 16836 kapi.go:86] Found 0 Pods for label selector kubernetes.io/minikube-addons=gcp-auth
I0914 16:45:23.435773 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:23.443533 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:23.640529 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:23.934945 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:23.943606 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:24.140264 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:24.435270 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:24.443326 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:24.639140 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:24.935906 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:24.943394 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:25.139327 16836 pod_ready.go:103] pod "coredns-7c65d6cfc9-fhkpr" in "kube-system" namespace has status "Ready":"False"
I0914 16:45:25.140504 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:25.435338 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:25.443968 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:25.639164 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:25.934715 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:25.944176 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:26.139910 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:26.435539 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:26.444466 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:26.694172 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:27.083012 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:27.083558 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:27.139313 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:27.139713 16836 pod_ready.go:103] pod "coredns-7c65d6cfc9-fhkpr" in "kube-system" namespace has status "Ready":"False"
I0914 16:45:27.434729 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:27.442681 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:27.639145 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:27.934802 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:27.942605 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:28.139421 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:28.435224 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:28.443148 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:28.639906 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:28.934386 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:28.943258 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:29.139539 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:29.139810 16836 pod_ready.go:103] pod "coredns-7c65d6cfc9-fhkpr" in "kube-system" namespace has status "Ready":"False"
I0914 16:45:29.434724 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:29.442740 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:29.638833 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:29.935016 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:29.943286 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:30.139047 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:30.434985 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:30.442702 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:30.639342 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:30.934399 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:30.943093 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:31.139869 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:31.434993 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:31.442883 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:31.639062 16836 pod_ready.go:103] pod "coredns-7c65d6cfc9-fhkpr" in "kube-system" namespace has status "Ready":"False"
I0914 16:45:31.639481 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:31.934338 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:31.943245 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:32.139071 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:32.435322 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:32.443841 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:32.640022 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:32.935650 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:32.944191 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:33.140241 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:33.434646 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:33.443582 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:33.639733 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:33.640283 16836 pod_ready.go:103] pod "coredns-7c65d6cfc9-fhkpr" in "kube-system" namespace has status "Ready":"False"
I0914 16:45:33.934982 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:33.943020 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:34.140509 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:34.434937 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:34.442965 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:34.639277 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:34.934822 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:34.944290 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:35.140764 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:35.434784 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:35.442471 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:35.639955 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:35.641065 16836 pod_ready.go:103] pod "coredns-7c65d6cfc9-fhkpr" in "kube-system" namespace has status "Ready":"False"
I0914 16:45:35.934452 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:35.943382 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:36.139202 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:36.434519 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:36.443551 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:36.639023 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:36.935090 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:36.943221 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:37.139090 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:37.434547 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:37.443547 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:37.639457 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:37.641722 16836 pod_ready.go:103] pod "coredns-7c65d6cfc9-fhkpr" in "kube-system" namespace has status "Ready":"False"
I0914 16:45:37.934542 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:37.944179 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:38.139625 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:38.435113 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:38.443041 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:38.639604 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:38.934406 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:38.943534 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:39.139253 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:39.435491 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:39.443740 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:39.639571 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:39.935497 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:39.943769 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:40.139383 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:40.140527 16836 pod_ready.go:103] pod "coredns-7c65d6cfc9-fhkpr" in "kube-system" namespace has status "Ready":"False"
I0914 16:45:40.434418 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:40.443801 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:40.639432 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:40.935131 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:40.943800 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:41.139796 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:41.435077 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:41.443460 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:41.639266 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:41.935055 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:41.943584 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:42.140101 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:42.141084 16836 pod_ready.go:103] pod "coredns-7c65d6cfc9-fhkpr" in "kube-system" namespace has status "Ready":"False"
I0914 16:45:42.435118 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:42.443513 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:42.639098 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:42.935094 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:42.943138 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:43.139957 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:43.435075 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:43.443088 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:43.640255 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:43.935030 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:43.943066 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:44.139725 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:44.434137 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:44.443645 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:44.638864 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:44.639522 16836 pod_ready.go:103] pod "coredns-7c65d6cfc9-fhkpr" in "kube-system" namespace has status "Ready":"False"
I0914 16:45:44.935084 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:44.943053 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:45.140123 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:45.434925 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:45.443043 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:45.639866 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:45.934775 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:45.942538 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:46.138940 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:46.435283 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:46.443431 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:46.639924 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:46.935539 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:46.943842 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:47.139976 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:47.140813 16836 pod_ready.go:93] pod "coredns-7c65d6cfc9-fhkpr" in "kube-system" namespace has status "Ready":"True"
I0914 16:45:47.140833 16836 pod_ready.go:82] duration metric: took 38.006726126s for pod "coredns-7c65d6cfc9-fhkpr" in "kube-system" namespace to be "Ready" ...
I0914 16:45:47.140847 16836 pod_ready.go:79] waiting up to 6m0s for pod "coredns-7c65d6cfc9-q72fv" in "kube-system" namespace to be "Ready" ...
I0914 16:45:47.142374 16836 pod_ready.go:98] error getting pod "coredns-7c65d6cfc9-q72fv" in "kube-system" namespace (skipping!): pods "coredns-7c65d6cfc9-q72fv" not found
I0914 16:45:47.142396 16836 pod_ready.go:82] duration metric: took 1.539019ms for pod "coredns-7c65d6cfc9-q72fv" in "kube-system" namespace to be "Ready" ...
E0914 16:45:47.142433 16836 pod_ready.go:67] WaitExtra: waitPodCondition: error getting pod "coredns-7c65d6cfc9-q72fv" in "kube-system" namespace (skipping!): pods "coredns-7c65d6cfc9-q72fv" not found
I0914 16:45:47.142445 16836 pod_ready.go:79] waiting up to 6m0s for pod "etcd-addons-657043" in "kube-system" namespace to be "Ready" ...
I0914 16:45:47.146513 16836 pod_ready.go:93] pod "etcd-addons-657043" in "kube-system" namespace has status "Ready":"True"
I0914 16:45:47.146530 16836 pod_ready.go:82] duration metric: took 4.076525ms for pod "etcd-addons-657043" in "kube-system" namespace to be "Ready" ...
I0914 16:45:47.146538 16836 pod_ready.go:79] waiting up to 6m0s for pod "kube-apiserver-addons-657043" in "kube-system" namespace to be "Ready" ...
I0914 16:45:47.150672 16836 pod_ready.go:93] pod "kube-apiserver-addons-657043" in "kube-system" namespace has status "Ready":"True"
I0914 16:45:47.150690 16836 pod_ready.go:82] duration metric: took 4.14605ms for pod "kube-apiserver-addons-657043" in "kube-system" namespace to be "Ready" ...
I0914 16:45:47.150699 16836 pod_ready.go:79] waiting up to 6m0s for pod "kube-controller-manager-addons-657043" in "kube-system" namespace to be "Ready" ...
I0914 16:45:47.154464 16836 pod_ready.go:93] pod "kube-controller-manager-addons-657043" in "kube-system" namespace has status "Ready":"True"
I0914 16:45:47.154480 16836 pod_ready.go:82] duration metric: took 3.774751ms for pod "kube-controller-manager-addons-657043" in "kube-system" namespace to be "Ready" ...
I0914 16:45:47.154489 16836 pod_ready.go:79] waiting up to 6m0s for pod "kube-proxy-k2qjf" in "kube-system" namespace to be "Ready" ...
I0914 16:45:47.337895 16836 pod_ready.go:93] pod "kube-proxy-k2qjf" in "kube-system" namespace has status "Ready":"True"
I0914 16:45:47.337917 16836 pod_ready.go:82] duration metric: took 183.421984ms for pod "kube-proxy-k2qjf" in "kube-system" namespace to be "Ready" ...
I0914 16:45:47.337927 16836 pod_ready.go:79] waiting up to 6m0s for pod "kube-scheduler-addons-657043" in "kube-system" namespace to be "Ready" ...
I0914 16:45:47.435550 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:47.443959 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:47.639711 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:47.737799 16836 pod_ready.go:93] pod "kube-scheduler-addons-657043" in "kube-system" namespace has status "Ready":"True"
I0914 16:45:47.737824 16836 pod_ready.go:82] duration metric: took 399.889725ms for pod "kube-scheduler-addons-657043" in "kube-system" namespace to be "Ready" ...
I0914 16:45:47.737837 16836 pod_ready.go:79] waiting up to 6m0s for pod "nvidia-device-plugin-daemonset-8wvj8" in "kube-system" namespace to be "Ready" ...
I0914 16:45:47.934385 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:47.945256 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:48.137860 16836 pod_ready.go:93] pod "nvidia-device-plugin-daemonset-8wvj8" in "kube-system" namespace has status "Ready":"True"
I0914 16:45:48.137885 16836 pod_ready.go:82] duration metric: took 400.040437ms for pod "nvidia-device-plugin-daemonset-8wvj8" in "kube-system" namespace to be "Ready" ...
I0914 16:45:48.137895 16836 pod_ready.go:39] duration metric: took 39.016624703s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0914 16:45:48.137921 16836 api_server.go:52] waiting for apiserver process to appear ...
I0914 16:45:48.137979 16836 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0914 16:45:48.140089 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:48.152405 16836 api_server.go:72] duration metric: took 41.153981727s to wait for apiserver process to appear ...
I0914 16:45:48.152430 16836 api_server.go:88] waiting for apiserver healthz status ...
I0914 16:45:48.152458 16836 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8443/healthz ...
I0914 16:45:48.156299 16836 api_server.go:279] https://192.168.49.2:8443/healthz returned 200:
ok
I0914 16:45:48.157159 16836 api_server.go:141] control plane version: v1.31.1
I0914 16:45:48.157180 16836 api_server.go:131] duration metric: took 4.743592ms to wait for apiserver health ...
I0914 16:45:48.157188 16836 system_pods.go:43] waiting for kube-system pods to appear ...
I0914 16:45:48.342902 16836 system_pods.go:59] 18 kube-system pods found
I0914 16:45:48.342939 16836 system_pods.go:61] "coredns-7c65d6cfc9-fhkpr" [ed430295-b8bb-4b63-879b-41ac0ffc425f] Running
I0914 16:45:48.342948 16836 system_pods.go:61] "csi-hostpath-attacher-0" [05fd59ce-bf15-4ac8-a8ba-ba33283ce18e] Pending / Ready:ContainersNotReady (containers with unready status: [csi-attacher]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-attacher])
I0914 16:45:48.342955 16836 system_pods.go:61] "csi-hostpath-resizer-0" [88020b83-2dc2-4f3b-849c-313753ff9e30] Pending / Ready:ContainersNotReady (containers with unready status: [csi-resizer]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-resizer])
I0914 16:45:48.342964 16836 system_pods.go:61] "csi-hostpathplugin-8rbhp" [68be09a0-981d-4c2a-a863-b68f786ef120] Pending / Ready:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter])
I0914 16:45:48.342971 16836 system_pods.go:61] "etcd-addons-657043" [79535ace-a736-4d42-a433-2c62172065fd] Running
I0914 16:45:48.342977 16836 system_pods.go:61] "kube-apiserver-addons-657043" [87a2c6e8-023b-4f28-9658-8f695938ef56] Running
I0914 16:45:48.342984 16836 system_pods.go:61] "kube-controller-manager-addons-657043" [e706191e-83a3-41b8-97d4-e7023ace20b7] Running
I0914 16:45:48.342995 16836 system_pods.go:61] "kube-ingress-dns-minikube" [5368b1a1-3e3e-4604-9d87-1f7cb6b7deb2] Running
I0914 16:45:48.343000 16836 system_pods.go:61] "kube-proxy-k2qjf" [0bee5060-fe14-4a2b-82ed-f1010c5ec25a] Running
I0914 16:45:48.343006 16836 system_pods.go:61] "kube-scheduler-addons-657043" [8b0a549e-3439-46c4-bdeb-f5cbb5c57bf4] Running
I0914 16:45:48.343015 16836 system_pods.go:61] "metrics-server-84c5f94fbc-brs9v" [58c5eb21-47b4-4c60-8417-5d5a5e6a66dd] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
I0914 16:45:48.343021 16836 system_pods.go:61] "nvidia-device-plugin-daemonset-8wvj8" [2f25db8f-f3dd-43be-ae80-0ec1d0689dc4] Running
I0914 16:45:48.343026 16836 system_pods.go:61] "registry-66c9cd494c-9jnkd" [fe7b5898-ffa3-4b87-b941-4220b03798f0] Running
I0914 16:45:48.343034 16836 system_pods.go:61] "registry-proxy-d2r6l" [5b832366-d825-43bd-8e89-8b85bdb2fda4] Pending / Ready:ContainersNotReady (containers with unready status: [registry-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [registry-proxy])
I0914 16:45:48.343042 16836 system_pods.go:61] "snapshot-controller-56fcc65765-5b7lc" [605775e7-c176-4086-9177-20d29e1d3408] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
I0914 16:45:48.343054 16836 system_pods.go:61] "snapshot-controller-56fcc65765-8rgp8" [538a76dd-782a-4277-ba29-311568404eee] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
I0914 16:45:48.343062 16836 system_pods.go:61] "storage-provisioner" [7e97288c-a120-43a5-a546-435fb0269a16] Running
I0914 16:45:48.343068 16836 system_pods.go:61] "tiller-deploy-b48cc5f79-rlg46" [93dc59e3-8af8-4f60-8fcc-bf165d66a1db] Running
I0914 16:45:48.343079 16836 system_pods.go:74] duration metric: took 185.884628ms to wait for pod list to return data ...
I0914 16:45:48.343091 16836 default_sa.go:34] waiting for default service account to be created ...
I0914 16:45:48.434435 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:48.443153 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:48.537917 16836 default_sa.go:45] found service account: "default"
I0914 16:45:48.537940 16836 default_sa.go:55] duration metric: took 194.839891ms for default service account to be created ...
I0914 16:45:48.537948 16836 system_pods.go:116] waiting for k8s-apps to be running ...
I0914 16:45:48.639557 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:48.743172 16836 system_pods.go:86] 18 kube-system pods found
I0914 16:45:48.743202 16836 system_pods.go:89] "coredns-7c65d6cfc9-fhkpr" [ed430295-b8bb-4b63-879b-41ac0ffc425f] Running
I0914 16:45:48.743215 16836 system_pods.go:89] "csi-hostpath-attacher-0" [05fd59ce-bf15-4ac8-a8ba-ba33283ce18e] Pending / Ready:ContainersNotReady (containers with unready status: [csi-attacher]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-attacher])
I0914 16:45:48.743225 16836 system_pods.go:89] "csi-hostpath-resizer-0" [88020b83-2dc2-4f3b-849c-313753ff9e30] Pending / Ready:ContainersNotReady (containers with unready status: [csi-resizer]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-resizer])
I0914 16:45:48.743239 16836 system_pods.go:89] "csi-hostpathplugin-8rbhp" [68be09a0-981d-4c2a-a863-b68f786ef120] Pending / Ready:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter])
I0914 16:45:48.743248 16836 system_pods.go:89] "etcd-addons-657043" [79535ace-a736-4d42-a433-2c62172065fd] Running
I0914 16:45:48.743253 16836 system_pods.go:89] "kube-apiserver-addons-657043" [87a2c6e8-023b-4f28-9658-8f695938ef56] Running
I0914 16:45:48.743258 16836 system_pods.go:89] "kube-controller-manager-addons-657043" [e706191e-83a3-41b8-97d4-e7023ace20b7] Running
I0914 16:45:48.743264 16836 system_pods.go:89] "kube-ingress-dns-minikube" [5368b1a1-3e3e-4604-9d87-1f7cb6b7deb2] Running
I0914 16:45:48.743270 16836 system_pods.go:89] "kube-proxy-k2qjf" [0bee5060-fe14-4a2b-82ed-f1010c5ec25a] Running
I0914 16:45:48.743274 16836 system_pods.go:89] "kube-scheduler-addons-657043" [8b0a549e-3439-46c4-bdeb-f5cbb5c57bf4] Running
I0914 16:45:48.743282 16836 system_pods.go:89] "metrics-server-84c5f94fbc-brs9v" [58c5eb21-47b4-4c60-8417-5d5a5e6a66dd] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
I0914 16:45:48.743285 16836 system_pods.go:89] "nvidia-device-plugin-daemonset-8wvj8" [2f25db8f-f3dd-43be-ae80-0ec1d0689dc4] Running
I0914 16:45:48.743292 16836 system_pods.go:89] "registry-66c9cd494c-9jnkd" [fe7b5898-ffa3-4b87-b941-4220b03798f0] Running
I0914 16:45:48.743299 16836 system_pods.go:89] "registry-proxy-d2r6l" [5b832366-d825-43bd-8e89-8b85bdb2fda4] Pending / Ready:ContainersNotReady (containers with unready status: [registry-proxy]) / ContainersReady:ContainersNotReady (containers with unready status: [registry-proxy])
I0914 16:45:48.743311 16836 system_pods.go:89] "snapshot-controller-56fcc65765-5b7lc" [605775e7-c176-4086-9177-20d29e1d3408] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
I0914 16:45:48.743325 16836 system_pods.go:89] "snapshot-controller-56fcc65765-8rgp8" [538a76dd-782a-4277-ba29-311568404eee] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
I0914 16:45:48.743334 16836 system_pods.go:89] "storage-provisioner" [7e97288c-a120-43a5-a546-435fb0269a16] Running
I0914 16:45:48.743339 16836 system_pods.go:89] "tiller-deploy-b48cc5f79-rlg46" [93dc59e3-8af8-4f60-8fcc-bf165d66a1db] Running
I0914 16:45:48.743347 16836 system_pods.go:126] duration metric: took 205.393821ms to wait for k8s-apps to be running ...
I0914 16:45:48.743356 16836 system_svc.go:44] waiting for kubelet service to be running ....
I0914 16:45:48.743406 16836 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I0914 16:45:48.754755 16836 system_svc.go:56] duration metric: took 11.391017ms WaitForService to wait for kubelet
I0914 16:45:48.754781 16836 kubeadm.go:582] duration metric: took 41.756363814s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0914 16:45:48.754799 16836 node_conditions.go:102] verifying NodePressure condition ...
I0914 16:45:48.934055 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:48.938427 16836 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I0914 16:45:48.938469 16836 node_conditions.go:123] node cpu capacity is 8
I0914 16:45:48.938480 16836 node_conditions.go:105] duration metric: took 183.677555ms to run NodePressure ...
I0914 16:45:48.938493 16836 start.go:241] waiting for startup goroutines ...
I0914 16:45:48.944017 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:49.139848 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:49.435241 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:49.443279 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:49.639866 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:49.934563 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:49.943563 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:50.140408 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:50.434433 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:50.443452 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:50.639610 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:50.934375 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:50.943387 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:51.138977 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:51.435472 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:51.443826 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:51.640831 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:51.934973 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:51.943171 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:52.140109 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:52.434760 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:52.444002 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:52.640239 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:52.935457 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:52.943888 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:53.139901 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:53.435763 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:53.443607 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:53.639743 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:53.934341 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:53.943369 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:54.142972 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:54.435137 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:54.442966 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:54.640679 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:54.934760 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0914 16:45:54.944112 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:55.139805 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:55.435337 16836 kapi.go:107] duration metric: took 35.004195296s to wait for kubernetes.io/minikube-addons=registry ...
I0914 16:45:55.443866 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:55.639348 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:55.943166 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:56.139505 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:56.443310 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:56.640827 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:56.944171 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:57.139987 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:57.445573 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:57.640437 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:57.944339 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:58.140005 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:58.444166 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:58.639859 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:58.943870 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:59.139635 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:59.443996 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:45:59.639591 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:45:59.943253 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:00.140208 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:00.443216 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:00.640734 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:00.943695 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:01.139085 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:01.446444 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:01.639160 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:01.943852 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:02.140214 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:02.444103 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:02.639896 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:02.943736 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:03.140871 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:03.540723 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:03.639902 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:03.943438 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:04.140251 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:04.443833 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:04.716292 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:04.946996 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:05.213878 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:05.444350 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:05.643255 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:05.945133 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:06.140516 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:06.443897 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:06.640898 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:06.943845 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:07.139460 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:07.443854 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:07.639287 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:07.943483 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:08.140050 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:08.443553 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:08.639953 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:08.944425 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:09.140160 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:09.443392 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:09.701893 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:09.943691 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:10.139805 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:10.443378 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:10.640172 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:10.943020 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:11.140051 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:11.444160 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:11.640543 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:11.945301 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:12.139740 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:12.443630 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:12.639868 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:12.943603 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:13.139323 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:13.442960 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:13.640456 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:13.944811 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:14.141226 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:14.443655 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:14.639847 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:14.944488 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:15.140654 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:15.443776 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:15.640308 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:15.944033 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:16.140929 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:16.444533 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:16.640643 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:16.943631 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:17.139670 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:17.443628 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:17.640277 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:17.945023 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:18.139721 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:18.443340 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:18.640007 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:18.944636 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:19.140153 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:19.443353 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:19.640954 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:19.943839 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:20.139635 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:20.443516 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:20.640018 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:20.943885 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:21.139602 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:21.444365 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:21.640420 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:21.943811 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:22.141016 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:22.444448 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:22.640601 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:22.945915 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:23.139737 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:23.444423 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:23.640599 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:23.943702 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:24.139886 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:24.442894 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:24.639915 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:24.944309 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:25.139883 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:25.444163 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:25.639784 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:25.943528 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:26.140653 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:26.465413 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:26.639955 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:26.944262 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:27.140730 16836 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0914 16:46:27.444598 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:27.640301 16836 kapi.go:107] duration metric: took 1m10.504742439s to wait for app.kubernetes.io/name=ingress-nginx ...
I0914 16:46:27.972380 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:28.443686 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0914 16:46:28.943433 16836 kapi.go:107] duration metric: took 1m7.504229517s to wait for kubernetes.io/minikube-addons=csi-hostpath-driver ...
I0914 16:46:45.766034 16836 kapi.go:86] Found 1 Pods for label selector kubernetes.io/minikube-addons=gcp-auth
I0914 16:46:45.766054 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:46:46.266172 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:46:46.766278 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:46:47.266592 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:46:47.766164 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:46:48.266148 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:46:48.765535 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:46:49.267108 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:46:49.766364 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:46:50.266124 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:46:50.766537 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:46:51.266744 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:46:51.766005 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:46:52.265969 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:46:52.765749 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:46:53.265744 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:46:53.765986 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:46:54.266095 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:46:54.767047 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:46:55.266304 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:46:55.766435 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:46:56.266303 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:46:56.766532 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:46:57.266849 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:46:57.766015 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:46:58.265855 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:46:58.766392 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:46:59.266350 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:46:59.766642 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:00.265547 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:00.766858 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:01.266108 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:01.766206 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:02.266605 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:02.765686 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:03.265924 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:03.765566 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:04.267041 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:04.766649 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:05.265634 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:05.765791 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:06.265960 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:06.766088 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:07.266530 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:07.765411 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:08.266742 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:08.765613 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:09.266677 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:09.765562 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:10.266622 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:10.765925 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:11.266265 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:11.766222 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:12.266668 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:12.765435 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:13.266843 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:13.766015 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:14.266230 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:14.766552 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:15.266718 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:15.765805 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:16.265904 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:16.765806 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:17.266351 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:17.766625 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:18.265983 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:18.765976 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:19.266051 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:19.766072 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:20.266703 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:20.765948 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:21.265956 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:21.766404 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:22.266579 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:22.765473 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:23.267026 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:23.766242 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:24.266595 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:24.766109 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:25.266461 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:25.766755 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:26.266154 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:26.766613 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:27.266038 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:27.766124 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:28.266244 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:28.766323 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:29.266119 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:29.814959 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:30.266080 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:30.766421 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:31.266699 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:31.765824 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:32.265813 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:32.765763 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:33.265733 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:33.765822 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:34.266083 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:34.766593 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:35.265568 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:35.766893 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:36.266047 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:36.765849 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:37.266543 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:37.765487 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:38.266618 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:38.766683 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:39.265517 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:39.765450 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:40.266961 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:40.766233 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:41.266615 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:41.766636 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:42.265805 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:42.765594 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:43.265644 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:43.765862 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:44.265755 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:44.766134 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:45.266202 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:45.766491 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:46.265828 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:46.765774 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:47.266307 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:47.765950 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:48.266514 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:48.765820 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:49.265822 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:49.765544 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:50.266873 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:50.766576 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:51.266218 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:51.766380 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:52.266581 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:52.765381 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:53.266410 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:53.766235 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:54.266740 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:54.766768 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:55.265955 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:55.766771 16836 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0914 16:47:56.265913 16836 kapi.go:107] duration metric: took 2m33.003273043s to wait for kubernetes.io/minikube-addons=gcp-auth ...
I0914 16:47:56.267859 16836 out.go:177] * Your GCP credentials will now be mounted into every pod created in the addons-657043 cluster.
I0914 16:47:56.269565 16836 out.go:177] * If you don't want your credentials mounted into a specific pod, add a label with the `gcp-auth-skip-secret` key to your pod configuration.
I0914 16:47:56.271358 16836 out.go:177] * If you want existing pods to be mounted with credentials, either recreate them or rerun addons enable with --refresh.
I0914 16:47:56.273156 16836 out.go:177] * Enabled addons: storage-provisioner, nvidia-device-plugin, volcano, ingress-dns, cloud-spanner, helm-tiller, inspektor-gadget, metrics-server, yakd, default-storageclass, volumesnapshots, registry, ingress, csi-hostpath-driver, gcp-auth
I0914 16:47:56.274620 16836 addons.go:510] duration metric: took 2m49.276147s for enable addons: enabled=[storage-provisioner nvidia-device-plugin volcano ingress-dns cloud-spanner helm-tiller inspektor-gadget metrics-server yakd default-storageclass volumesnapshots registry ingress csi-hostpath-driver gcp-auth]
I0914 16:47:56.274672 16836 start.go:246] waiting for cluster config update ...
I0914 16:47:56.274697 16836 start.go:255] writing updated cluster config ...
I0914 16:47:56.274982 16836 ssh_runner.go:195] Run: rm -f paused
I0914 16:47:56.323377 16836 start.go:600] kubectl: 1.31.0, cluster: 1.31.1 (minor skew: 0)
I0914 16:47:56.325360 16836 out.go:177] * Done! kubectl is now configured to use "addons-657043" cluster and "default" namespace by default
==> Docker <==
Sep 14 16:57:25 addons-657043 dockerd[1339]: time="2024-09-14T16:57:25.114760287Z" level=info msg="ignoring event" container=b638226c409fd1477700a939db46751f5c7e3b6aa2e37942d0f27d648472ffbc module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 14 16:57:26 addons-657043 dockerd[1339]: time="2024-09-14T16:57:26.828715167Z" level=info msg="ignoring event" container=775e41abe76aa986a47b37048f113c9a370c7fb67734a67ab0b33858b15e841f module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 14 16:57:27 addons-657043 dockerd[1339]: time="2024-09-14T16:57:27.012784201Z" level=info msg="ignoring event" container=d2818dbe1e4dce454aa9484b94955702be6fd55f878d7fb035f66de610a9e1d4 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 14 16:57:27 addons-657043 dockerd[1339]: time="2024-09-14T16:57:27.014125289Z" level=info msg="ignoring event" container=0d6e33ff2457eb73387cd40ad153613d1269d2a0b27446e9c486799835723a01 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 14 16:57:27 addons-657043 dockerd[1339]: time="2024-09-14T16:57:27.015470450Z" level=info msg="ignoring event" container=8ab5baa5e1dea7dc5a9a92a7645bea66dd1ba1d832290ef2f037f9237e5fede5 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 14 16:57:27 addons-657043 dockerd[1339]: time="2024-09-14T16:57:27.019681886Z" level=info msg="ignoring event" container=e90e36a985934a2b7b03bc9059c96ec06a8100b5c26e864606a0bd917bdb5f55 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 14 16:57:27 addons-657043 dockerd[1339]: time="2024-09-14T16:57:27.020402698Z" level=info msg="ignoring event" container=aed081743143f1edb767244e48a89050be8eefbb03c807e5d8db3f2ba7be871e module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 14 16:57:27 addons-657043 dockerd[1339]: time="2024-09-14T16:57:27.022250024Z" level=info msg="ignoring event" container=8b8ff841d6b21a2f279e2d51acb1b60c55debba7b9cb50272d870df4bcdcbc35 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 14 16:57:27 addons-657043 dockerd[1339]: time="2024-09-14T16:57:27.113714479Z" level=info msg="ignoring event" container=53a8b48dc4e27bb3e74c190bef869da870d84491bf9756d0b76f6c2b89fe38ac module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 14 16:57:27 addons-657043 dockerd[1339]: time="2024-09-14T16:57:27.278135247Z" level=info msg="ignoring event" container=2cc95b805a2137424223199fabfe85e61bcdc438fc85ef4e3ed404dd3a216ebe module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 14 16:57:27 addons-657043 dockerd[1339]: time="2024-09-14T16:57:27.281513676Z" level=info msg="ignoring event" container=280ced824be7864eb0dc3bc9d69367a6b87921f2af2c5658378faabe4adf8afc module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 14 16:57:27 addons-657043 dockerd[1339]: time="2024-09-14T16:57:27.325023692Z" level=info msg="ignoring event" container=25dc3804249799d8d792a34b7e546c8f56c601eac3f7c22db5dae6cf4d7b10a6 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 14 16:57:30 addons-657043 dockerd[1339]: time="2024-09-14T16:57:30.865647345Z" level=info msg="Attempting next endpoint for pull after error: Head \"https://gcr.io/v2/k8s-minikube/busybox/manifests/latest\": unauthorized: authentication failed"
Sep 14 16:57:30 addons-657043 dockerd[1339]: time="2024-09-14T16:57:30.867784178Z" level=error msg="Handler for POST /v1.43/images/create returned error: Head \"https://gcr.io/v2/k8s-minikube/busybox/manifests/latest\": unauthorized: authentication failed"
Sep 14 16:57:33 addons-657043 dockerd[1339]: time="2024-09-14T16:57:33.249617456Z" level=info msg="ignoring event" container=a3d00b5dc1f2594193e0995b20b1e50223a62c1d28800268d2e671edf24c8ac7 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 14 16:57:33 addons-657043 dockerd[1339]: time="2024-09-14T16:57:33.254675366Z" level=info msg="ignoring event" container=3380ffb20d37ebda74f034913bc384aba45d6f04293d930455880d6eb6ab4ba2 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 14 16:57:33 addons-657043 dockerd[1339]: time="2024-09-14T16:57:33.427997766Z" level=info msg="ignoring event" container=72b035856b17993693870fcd033c2364f31e31a8b1be01250b31e68c6d34c27b module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 14 16:57:33 addons-657043 dockerd[1339]: time="2024-09-14T16:57:33.464282965Z" level=info msg="ignoring event" container=c01323d8b75544f1cf92f3606cccc9fa3637916eac28118f0e96fed0ce16e3a7 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 14 16:57:34 addons-657043 cri-dockerd[1604]: time="2024-09-14T16:57:34Z" level=error msg="error getting RW layer size for container ID '8b8ff841d6b21a2f279e2d51acb1b60c55debba7b9cb50272d870df4bcdcbc35': Error response from daemon: No such container: 8b8ff841d6b21a2f279e2d51acb1b60c55debba7b9cb50272d870df4bcdcbc35"
Sep 14 16:57:34 addons-657043 cri-dockerd[1604]: time="2024-09-14T16:57:34Z" level=error msg="Set backoffDuration to : 1m0s for container ID '8b8ff841d6b21a2f279e2d51acb1b60c55debba7b9cb50272d870df4bcdcbc35'"
Sep 14 16:57:50 addons-657043 dockerd[1339]: time="2024-09-14T16:57:50.701162053Z" level=info msg="ignoring event" container=e2697c03cbca9a81e8a857460892da2cd8b0071bbcf95d632e5b643f6e3c5c8b module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 14 16:57:51 addons-657043 dockerd[1339]: time="2024-09-14T16:57:51.222311137Z" level=info msg="ignoring event" container=a4b1db06a7155a537851755e5a7bed7f56c43eba134df8aff6ba8128ab076a81 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 14 16:57:51 addons-657043 dockerd[1339]: time="2024-09-14T16:57:51.283472447Z" level=info msg="ignoring event" container=760726f77392896a2a5fa5fdb10420838dd0c8ebea108f0582727f5395221658 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 14 16:57:51 addons-657043 dockerd[1339]: time="2024-09-14T16:57:51.370913045Z" level=info msg="ignoring event" container=cb280d5a1f419ff4eb7dd528b290b54d19b87b9a3269b65b12903e1b1c4b2f13 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 14 16:57:51 addons-657043 dockerd[1339]: time="2024-09-14T16:57:51.466573283Z" level=info msg="ignoring event" container=dc586162cf90841b10b70da611698c2f89d7a80dae6c9b33565b9126b579a5b4 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD
d7e7de8c87064 kicbase/echo-server@sha256:127ac38a2bb9537b7f252addff209ea6801edcac8a92c8b1104dacd66a583ed6 31 seconds ago Running hello-world-app 0 f1a5aedd1930a hello-world-app-55bf9c44b4-cw7tn
f82d28678211c nginx@sha256:a5127daff3d6f4606be3100a252419bfa84fd6ee5cd74d0feaca1a5068f97dcf 41 seconds ago Running nginx 0 4920b7a03966f nginx
95cf0166088e5 gcr.io/k8s-minikube/gcp-auth-webhook@sha256:e6c5b3bc32072ea370d34c27836efd11b3519d25bd444c2a8efc339cff0e20fb 9 minutes ago Running gcp-auth 0 00ff3f2e010a5 gcp-auth-89d5ffd79-bnpnj
de7fc5b5d304d ce263a8653f9c 11 minutes ago Exited patch 1 e1a41cf14d22a ingress-nginx-admission-patch-kvc4p
a5454bbf6c0ac registry.k8s.io/ingress-nginx/kube-webhook-certgen@sha256:a320a50cc91bd15fd2d6fa6de58bd98c1bd64b9a6f926ce23a600d87043455a3 11 minutes ago Exited create 0 95ac1242879de ingress-nginx-admission-create-k9pp8
760726f773928 gcr.io/k8s-minikube/kube-registry-proxy@sha256:b3fa0b2df8737fdb85ad5918a7e2652527463e357afff83a5e5bb966bcedc367 11 minutes ago Exited registry-proxy 0 dc586162cf908 registry-proxy-d2r6l
9d324d1b07e0c rancher/local-path-provisioner@sha256:e34c88ae0affb1cdefbb874140d6339d4a27ec4ee420ae8199cd839997b05246 12 minutes ago Running local-path-provisioner 0 87c2c503cb816 local-path-provisioner-86d989889c-zvjcl
a4b1db06a7155 registry@sha256:ac0192b549007e22998eb74e8d8488dcfe70f1489520c3b144a6047ac5efbe90 12 minutes ago Exited registry 0 cb280d5a1f419 registry-66c9cd494c-9jnkd
33c500f1c400e 6e38f40d628db 12 minutes ago Running storage-provisioner 0 48e0886e49a71 storage-provisioner
684f1493f8067 c69fa2e9cbf5f 12 minutes ago Running coredns 0 3c68b385d9cd1 coredns-7c65d6cfc9-fhkpr
ba0a5dfb23a3d 60c005f310ff3 12 minutes ago Running kube-proxy 0 02c4514db5c90 kube-proxy-k2qjf
160e3b6ff7cf4 6bab7719df100 12 minutes ago Running kube-apiserver 0 ca6ebcae6be67 kube-apiserver-addons-657043
106d1ca8bab77 2e96e5913fc06 12 minutes ago Running etcd 0 8e1e78b4dc8b9 etcd-addons-657043
da7731688ce7f 9aa1fad941575 12 minutes ago Running kube-scheduler 0 936fec7c8effc kube-scheduler-addons-657043
7ddbe06a7c28b 175ffd71cce3d 12 minutes ago Running kube-controller-manager 0 1a382f5afa3d2 kube-controller-manager-addons-657043
==> coredns [684f1493f806] <==
[INFO] 10.244.0.22:44608 - 49498 "AAAA IN hello-world-app.default.svc.cluster.local.c.k8s-minikube.internal. udp 83 false 512" NXDOMAIN qr,rd,ra 83 0.004623724s
[INFO] 10.244.0.22:59513 - 16845 "A IN hello-world-app.default.svc.cluster.local.c.k8s-minikube.internal. udp 83 false 512" NXDOMAIN qr,rd,ra 83 0.004598998s
[INFO] 10.244.0.22:36541 - 58725 "A IN hello-world-app.default.svc.cluster.local.c.k8s-minikube.internal. udp 83 false 512" NXDOMAIN qr,rd,ra 83 0.004740544s
[INFO] 10.244.0.22:44608 - 50004 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.003807318s
[INFO] 10.244.0.22:36098 - 26892 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.004111857s
[INFO] 10.244.0.22:36541 - 53007 "AAAA IN hello-world-app.default.svc.cluster.local.c.k8s-minikube.internal. udp 83 false 512" NXDOMAIN qr,rd,ra 83 0.066829842s
[INFO] 10.244.0.22:36098 - 19111 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.064974526s
[INFO] 10.244.0.22:37163 - 23571 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.069160826s
[INFO] 10.244.0.22:59513 - 35055 "AAAA IN hello-world-app.default.svc.cluster.local.c.k8s-minikube.internal. udp 83 false 512" NXDOMAIN qr,rd,ra 83 0.06716469s
[INFO] 10.244.0.22:57173 - 43424 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.069397157s
[INFO] 10.244.0.22:44608 - 16510 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.065253903s
[INFO] 10.244.0.22:35760 - 9981 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.068670716s
[INFO] 10.244.0.22:36320 - 29045 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.069668836s
[INFO] 10.244.0.22:37163 - 61864 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000082415s
[INFO] 10.244.0.22:36098 - 59847 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000060727s
[INFO] 10.244.0.22:36320 - 32509 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.00008201s
[INFO] 10.244.0.22:35760 - 22774 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000073296s
[INFO] 10.244.0.22:44608 - 65176 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000050883s
[INFO] 10.244.0.22:57173 - 63748 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000066118s
[INFO] 10.244.0.22:36541 - 34939 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.005520443s
[INFO] 10.244.0.22:59513 - 28722 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.005161943s
[INFO] 10.244.0.22:36541 - 38526 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.005338491s
[INFO] 10.244.0.22:36541 - 49128 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000100182s
[INFO] 10.244.0.22:59513 - 34063 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.003248081s
[INFO] 10.244.0.22:59513 - 19450 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000091863s
==> describe nodes <==
Name: addons-657043
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=addons-657043
kubernetes.io/os=linux
minikube.k8s.io/commit=fbeeb744274463b05401c917e5ab21bbaf5ef95a
minikube.k8s.io/name=addons-657043
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2024_09_14T16_45_03_0700
minikube.k8s.io/version=v1.34.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
topology.hostpath.csi/node=addons-657043
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///var/run/cri-dockerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Sat, 14 Sep 2024 16:45:00 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: addons-657043
AcquireTime: <unset>
RenewTime: Sat, 14 Sep 2024 16:57:47 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Sat, 14 Sep 2024 16:57:39 +0000 Sat, 14 Sep 2024 16:44:58 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Sat, 14 Sep 2024 16:57:39 +0000 Sat, 14 Sep 2024 16:44:58 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Sat, 14 Sep 2024 16:57:39 +0000 Sat, 14 Sep 2024 16:44:58 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Sat, 14 Sep 2024 16:57:39 +0000 Sat, 14 Sep 2024 16:45:00 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.49.2
Hostname: addons-657043
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32859316Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32859316Ki
pods: 110
System Info:
Machine ID: 0af4c1bb5fd34da7ab104186f76004ca
System UUID: e1920e15-6d0e-439e-9dce-c03d5445b400
Boot ID: 09ab8332-6c06-47ad-962c-5ddacc36dde9
Kernel Version: 5.15.0-1068-gcp
OS Image: Ubuntu 22.04.4 LTS
Operating System: linux
Architecture: amd64
Container Runtime Version: docker://27.2.1
Kubelet Version: v1.31.1
Kube-Proxy Version: v1.31.1
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (12 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 9m15s
default hello-world-app-55bf9c44b4-cw7tn 0 (0%) 0 (0%) 0 (0%) 0 (0%) 34s
default nginx 0 (0%) 0 (0%) 0 (0%) 0 (0%) 44s
gcp-auth gcp-auth-89d5ffd79-bnpnj 0 (0%) 0 (0%) 0 (0%) 0 (0%) 11m
kube-system coredns-7c65d6cfc9-fhkpr 100m (1%) 0 (0%) 70Mi (0%) 170Mi (0%) 12m
kube-system etcd-addons-657043 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 12m
kube-system kube-apiserver-addons-657043 250m (3%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system kube-controller-manager-addons-657043 200m (2%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system kube-proxy-k2qjf 0 (0%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system kube-scheduler-addons-657043 100m (1%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 12m
local-path-storage local-path-provisioner-86d989889c-zvjcl 0 (0%) 0 (0%) 0 (0%) 0 (0%) 12m
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 750m (9%) 0 (0%)
memory 170Mi (0%) 170Mi (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 12m kube-proxy
Normal NodeAllocatableEnforced 12m kubelet Updated Node Allocatable limit across pods
Warning CgroupV1 12m kubelet Cgroup v1 support is in maintenance mode, please migrate to Cgroup v2.
Normal NodeHasSufficientMemory 12m (x8 over 12m) kubelet Node addons-657043 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 12m (x7 over 12m) kubelet Node addons-657043 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 12m (x7 over 12m) kubelet Node addons-657043 status is now: NodeHasSufficientPID
Normal Starting 12m kubelet Starting kubelet.
Normal Starting 12m kubelet Starting kubelet.
Warning CgroupV1 12m kubelet Cgroup v1 support is in maintenance mode, please migrate to Cgroup v2.
Normal NodeAllocatableEnforced 12m kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 12m kubelet Node addons-657043 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 12m kubelet Node addons-657043 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 12m kubelet Node addons-657043 status is now: NodeHasSufficientPID
Normal RegisteredNode 12m node-controller Node addons-657043 event: Registered Node addons-657043 in Controller
==> dmesg <==
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff 5e aa 9e f3 08 41 08 06
[ +2.547066] IPv4: martian source 10.244.0.1 from 10.244.0.18, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff d6 2a 85 35 af 65 08 06
[ +5.987416] IPv4: martian source 10.244.0.1 from 10.244.0.20, on dev eth0
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff ba c7 62 99 33 b4 08 06
[ +0.256590] IPv4: martian source 10.244.0.1 from 10.244.0.19, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff 72 96 98 47 93 42 08 06
[ +0.224441] IPv4: martian source 10.244.0.1 from 10.244.0.21, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff 5e c2 14 67 50 7b 08 06
[ +6.023499] IPv4: martian source 10.244.0.1 from 10.244.0.22, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff 6e 23 e3 dc ca 50 08 06
[Sep14 16:47] IPv4: martian source 10.244.0.1 from 10.244.0.24, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff 9e 3d 19 bf b6 72 08 06
[ +0.028662] IPv4: martian source 10.244.0.1 from 10.244.0.25, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff 1a 52 b6 9c 89 b3 08 06
[ +28.421273] IPv4: martian source 10.244.0.1 from 10.244.0.26, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff d6 3f 28 c5 4e f9 08 06
[ +0.000458] IPv4: martian source 10.244.0.26 from 10.244.0.3, on dev eth0
[ +0.000003] ll header: 00000000: ff ff ff ff ff ff 66 10 16 b1 99 3c 08 06
[Sep14 16:56] IPv4: martian source 10.244.0.1 from 10.244.0.35, on dev eth0
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff 52 86 5c b2 52 e1 08 06
[Sep14 16:57] IPv4: martian source 10.244.0.36 from 10.244.0.22, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff 6e 23 e3 dc ca 50 08 06
[ +1.720265] IPv4: martian source 10.244.0.22 from 10.244.0.3, on dev eth0
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff 66 10 16 b1 99 3c 08 06
==> etcd [106d1ca8bab7] <==
{"level":"info","ts":"2024-09-14T16:44:59.241310Z","caller":"etcdserver/server.go:2629","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2024-09-14T16:44:59.241903Z","caller":"etcdserver/server.go:2118","msg":"published local member to cluster through raft","local-member-id":"aec36adc501070cc","local-member-attributes":"{Name:addons-657043 ClientURLs:[https://192.168.49.2:2379]}","request-path":"/0/members/aec36adc501070cc/attributes","cluster-id":"fa54960ea34d58be","publish-timeout":"7s"}
{"level":"info","ts":"2024-09-14T16:44:59.241933Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2024-09-14T16:44:59.241939Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2024-09-14T16:44:59.242156Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2024-09-14T16:44:59.242180Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2024-09-14T16:44:59.242263Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"fa54960ea34d58be","local-member-id":"aec36adc501070cc","cluster-version":"3.5"}
{"level":"info","ts":"2024-09-14T16:44:59.242356Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2024-09-14T16:44:59.242398Z","caller":"etcdserver/server.go:2653","msg":"cluster version is updated","cluster-version":"3.5"}
{"level":"info","ts":"2024-09-14T16:44:59.242956Z","caller":"v3rpc/health.go:61","msg":"grpc service status changed","service":"","status":"SERVING"}
{"level":"info","ts":"2024-09-14T16:44:59.242976Z","caller":"v3rpc/health.go:61","msg":"grpc service status changed","service":"","status":"SERVING"}
{"level":"info","ts":"2024-09-14T16:44:59.243917Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.49.2:2379"}
{"level":"info","ts":"2024-09-14T16:44:59.243943Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"info","ts":"2024-09-14T16:45:16.720383Z","caller":"traceutil/trace.go:171","msg":"trace[1430260124] transaction","detail":"{read_only:false; response_revision:678; number_of_response:1; }","duration":"102.936878ms","start":"2024-09-14T16:45:16.617419Z","end":"2024-09-14T16:45:16.720356Z","steps":["trace[1430260124] 'process raft request' (duration: 102.518666ms)"],"step_count":1}
{"level":"warn","ts":"2024-09-14T16:45:27.069935Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"136.661513ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/pods\" limit:1 ","response":"range_response_count:0 size:5"}
{"level":"info","ts":"2024-09-14T16:45:27.070006Z","caller":"traceutil/trace.go:171","msg":"trace[94094035] range","detail":"{range_begin:/registry/pods; range_end:; response_count:0; response_revision:1005; }","duration":"136.76389ms","start":"2024-09-14T16:45:26.933229Z","end":"2024-09-14T16:45:27.069993Z","steps":["trace[94094035] 'range keys from in-memory index tree' (duration: 136.604301ms)"],"step_count":1}
{"level":"info","ts":"2024-09-14T16:45:27.081274Z","caller":"traceutil/trace.go:171","msg":"trace[1113925949] linearizableReadLoop","detail":"{readStateIndex:1027; appliedIndex:1026; }","duration":"139.892605ms","start":"2024-09-14T16:45:26.941365Z","end":"2024-09-14T16:45:27.081257Z","steps":["trace[1113925949] 'read index received' (duration: 66.330085ms)","trace[1113925949] 'applied index is now lower than readState.Index' (duration: 73.561377ms)"],"step_count":2}
{"level":"warn","ts":"2024-09-14T16:45:27.081378Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"139.994998ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/pods\" limit:1 ","response":"range_response_count:0 size:5"}
{"level":"info","ts":"2024-09-14T16:45:27.081401Z","caller":"traceutil/trace.go:171","msg":"trace[1787578825] range","detail":"{range_begin:/registry/pods; range_end:; response_count:0; response_revision:1005; }","duration":"140.038338ms","start":"2024-09-14T16:45:26.941357Z","end":"2024-09-14T16:45:27.081396Z","steps":["trace[1787578825] 'agreement among raft nodes before linearized reading' (duration: 139.972806ms)"],"step_count":1}
{"level":"info","ts":"2024-09-14T16:45:54.418024Z","caller":"traceutil/trace.go:171","msg":"trace[1344731661] transaction","detail":"{read_only:false; response_revision:1097; number_of_response:1; }","duration":"130.691387ms","start":"2024-09-14T16:45:54.287314Z","end":"2024-09-14T16:45:54.418005Z","steps":["trace[1344731661] 'process raft request' (duration: 130.540304ms)"],"step_count":1}
{"level":"warn","ts":"2024-09-14T16:45:59.626087Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"111.530672ms","expected-duration":"100ms","prefix":"read-only range ","request":"limit:1 serializable:true keys_only:true ","response":"range_response_count:0 size:5"}
{"level":"info","ts":"2024-09-14T16:45:59.626154Z","caller":"traceutil/trace.go:171","msg":"trace[1333274046] range","detail":"{range_begin:; range_end:; response_count:0; response_revision:1120; }","duration":"111.617041ms","start":"2024-09-14T16:45:59.514524Z","end":"2024-09-14T16:45:59.626141Z","steps":["trace[1333274046] 'range keys from in-memory index tree' (duration: 111.513497ms)"],"step_count":1}
{"level":"info","ts":"2024-09-14T16:54:59.260796Z","caller":"mvcc/index.go:214","msg":"compact tree index","revision":1919}
{"level":"info","ts":"2024-09-14T16:54:59.285676Z","caller":"mvcc/kvstore_compaction.go:69","msg":"finished scheduled compaction","compact-revision":1919,"took":"24.328646ms","hash":3528032701,"current-db-size-bytes":9310208,"current-db-size":"9.3 MB","current-db-size-in-use-bytes":5070848,"current-db-size-in-use":"5.1 MB"}
{"level":"info","ts":"2024-09-14T16:54:59.285719Z","caller":"mvcc/hash.go:137","msg":"storing new hash","hash":3528032701,"revision":1919,"compact-revision":-1}
==> gcp-auth [95cf0166088e] <==
2024/09/14 16:48:37 Ready to write response ...
2024/09/14 16:56:39 Ready to marshal response ...
2024/09/14 16:56:39 Ready to write response ...
2024/09/14 16:56:39 Ready to marshal response ...
2024/09/14 16:56:39 Ready to write response ...
2024/09/14 16:56:50 Ready to marshal response ...
2024/09/14 16:56:50 Ready to write response ...
2024/09/14 16:56:50 Ready to marshal response ...
2024/09/14 16:56:50 Ready to write response ...
2024/09/14 16:56:51 Ready to marshal response ...
2024/09/14 16:56:51 Ready to write response ...
2024/09/14 16:56:51 Ready to marshal response ...
2024/09/14 16:56:51 Ready to write response ...
2024/09/14 16:56:51 Ready to marshal response ...
2024/09/14 16:56:51 Ready to write response ...
2024/09/14 16:56:51 Ready to marshal response ...
2024/09/14 16:56:51 Ready to write response ...
2024/09/14 16:56:55 Ready to marshal response ...
2024/09/14 16:56:55 Ready to write response ...
2024/09/14 16:57:08 Ready to marshal response ...
2024/09/14 16:57:08 Ready to write response ...
2024/09/14 16:57:16 Ready to marshal response ...
2024/09/14 16:57:16 Ready to write response ...
2024/09/14 16:57:18 Ready to marshal response ...
2024/09/14 16:57:18 Ready to write response ...
==> kernel <==
16:57:52 up 40 min, 0 users, load average: 1.00, 0.69, 0.63
Linux addons-657043 5.15.0-1068-gcp #76~20.04.1-Ubuntu SMP Tue Aug 20 15:52:45 UTC 2024 x86_64 x86_64 x86_64 GNU/Linux
PRETTY_NAME="Ubuntu 22.04.4 LTS"
==> kube-apiserver [160e3b6ff7cf] <==
W0914 16:48:28.922649 1 cacher.go:171] Terminating all watchers from cacher numatopologies.nodeinfo.volcano.sh
W0914 16:48:28.942819 1 cacher.go:171] Terminating all watchers from cacher queues.scheduling.volcano.sh
W0914 16:48:29.132108 1 cacher.go:171] Terminating all watchers from cacher jobs.batch.volcano.sh
W0914 16:48:29.518363 1 cacher.go:171] Terminating all watchers from cacher jobflows.flow.volcano.sh
W0914 16:48:29.839290 1 cacher.go:171] Terminating all watchers from cacher jobtemplates.flow.volcano.sh
I0914 16:56:51.324360 1 alloc.go:330] "allocated clusterIPs" service="headlamp/headlamp" clusterIPs={"IPv4":"10.98.188.37"}
I0914 16:57:03.807644 1 controller.go:615] quota admission added evaluator for: volumesnapshots.snapshot.storage.k8s.io
I0914 16:57:08.272212 1 controller.go:615] quota admission added evaluator for: ingresses.networking.k8s.io
I0914 16:57:08.434402 1 alloc.go:330] "allocated clusterIPs" service="default/nginx" clusterIPs={"IPv4":"10.111.134.60"}
I0914 16:57:12.874878 1 handler.go:286] Adding GroupVersion gadget.kinvolk.io v1alpha1 to ResourceManager
W0914 16:57:13.893504 1 cacher.go:171] Terminating all watchers from cacher traces.gadget.kinvolk.io
I0914 16:57:18.923284 1 alloc.go:330] "allocated clusterIPs" service="default/hello-world-app" clusterIPs={"IPv4":"10.107.59.248"}
I0914 16:57:33.113158 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0914 16:57:33.113209 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0914 16:57:33.125860 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0914 16:57:33.125912 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0914 16:57:33.138054 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0914 16:57:33.138109 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0914 16:57:33.151636 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0914 16:57:33.151681 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0914 16:57:33.228799 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0914 16:57:33.228840 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
W0914 16:57:34.152693 1 cacher.go:171] Terminating all watchers from cacher volumesnapshotcontents.snapshot.storage.k8s.io
W0914 16:57:34.229723 1 cacher.go:171] Terminating all watchers from cacher volumesnapshotclasses.snapshot.storage.k8s.io
W0914 16:57:34.261965 1 cacher.go:171] Terminating all watchers from cacher volumesnapshots.snapshot.storage.k8s.io
==> kube-controller-manager [7ddbe06a7c28] <==
E0914 16:57:37.040887 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
I0914 16:57:37.530181 1 shared_informer.go:313] Waiting for caches to sync for resource quota
I0914 16:57:37.530223 1 shared_informer.go:320] Caches are synced for resource quota
W0914 16:57:37.852621 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0914 16:57:37.852668 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
I0914 16:57:37.917379 1 shared_informer.go:313] Waiting for caches to sync for garbage collector
I0914 16:57:37.917434 1 shared_informer.go:320] Caches are synced for garbage collector
W0914 16:57:38.045188 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0914 16:57:38.045236 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
I0914 16:57:39.504748 1 range_allocator.go:241] "Successfully synced" logger="node-ipam-controller" key="addons-657043"
W0914 16:57:42.046339 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0914 16:57:42.046387 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0914 16:57:42.262475 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0914 16:57:42.262514 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0914 16:57:42.447218 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0914 16:57:42.447264 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0914 16:57:42.495229 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0914 16:57:42.495275 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0914 16:57:42.641347 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0914 16:57:42.641386 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0914 16:57:50.659891 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0914 16:57:50.659945 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
I0914 16:57:51.135629 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/registry-66c9cd494c" duration="14.664µs"
W0914 16:57:51.893507 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0914 16:57:51.893548 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
==> kube-proxy [ba0a5dfb23a3] <==
I0914 16:45:11.031727 1 server_linux.go:66] "Using iptables proxy"
I0914 16:45:11.523373 1 server.go:677] "Successfully retrieved node IP(s)" IPs=["192.168.49.2"]
E0914 16:45:11.523472 1 server.go:234] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I0914 16:45:11.932489 1 server.go:243] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I0914 16:45:11.932551 1 server_linux.go:169] "Using iptables Proxier"
I0914 16:45:12.021107 1 proxier.go:255] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I0914 16:45:12.021699 1 server.go:483] "Version info" version="v1.31.1"
I0914 16:45:12.021730 1 server.go:485] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I0914 16:45:12.030866 1 config.go:199] "Starting service config controller"
I0914 16:45:12.030891 1 shared_informer.go:313] Waiting for caches to sync for service config
I0914 16:45:12.030919 1 config.go:105] "Starting endpoint slice config controller"
I0914 16:45:12.030925 1 shared_informer.go:313] Waiting for caches to sync for endpoint slice config
I0914 16:45:12.031541 1 config.go:328] "Starting node config controller"
I0914 16:45:12.031557 1 shared_informer.go:313] Waiting for caches to sync for node config
I0914 16:45:12.136137 1 shared_informer.go:320] Caches are synced for node config
I0914 16:45:12.136175 1 shared_informer.go:320] Caches are synced for service config
I0914 16:45:12.136213 1 shared_informer.go:320] Caches are synced for endpoint slice config
==> kube-scheduler [da7731688ce7] <==
W0914 16:45:00.242774 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E0914 16:45:00.242787 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
W0914 16:45:00.242789 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
E0914 16:45:00.242824 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0914 16:45:00.314019 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope
E0914 16:45:00.314072 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError"
W0914 16:45:01.056925 1 reflector.go:561] runtime/asm_amd64.s:1695: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system"
E0914 16:45:01.056963 1 reflector.go:158] "Unhandled Error" err="runtime/asm_amd64.s:1695: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"extension-apiserver-authentication\" is forbidden: User \"system:kube-scheduler\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\"" logger="UnhandledError"
W0914 16:45:01.096726 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E0914 16:45:01.096766 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0914 16:45:01.145714 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E0914 16:45:01.145753 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User \"system:kube-scheduler\" cannot list resource \"services\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0914 16:45:01.315887 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
E0914 16:45:01.315928 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0914 16:45:01.353581 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E0914 16:45:01.353626 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
W0914 16:45:01.375955 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope
E0914 16:45:01.375996 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
W0914 16:45:01.385284 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope
E0914 16:45:01.385324 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicationcontrollers\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0914 16:45:01.449035 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope
E0914 16:45:01.449072 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"storageclasses\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
W0914 16:45:01.465419 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
E0914 16:45:01.465457 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User \"system:kube-scheduler\" cannot list resource \"namespaces\" in API group \"\" at the cluster scope" logger="UnhandledError"
I0914 16:45:03.039830 1 shared_informer.go:320] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Sep 14 16:57:34 addons-657043 kubelet[2445]: I0914 16:57:34.082457 2445 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"docker","ID":"3380ffb20d37ebda74f034913bc384aba45d6f04293d930455880d6eb6ab4ba2"} err="failed to get container status \"3380ffb20d37ebda74f034913bc384aba45d6f04293d930455880d6eb6ab4ba2\": rpc error: code = Unknown desc = Error response from daemon: No such container: 3380ffb20d37ebda74f034913bc384aba45d6f04293d930455880d6eb6ab4ba2"
Sep 14 16:57:34 addons-657043 kubelet[2445]: I0914 16:57:34.733727 2445 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="538a76dd-782a-4277-ba29-311568404eee" path="/var/lib/kubelet/pods/538a76dd-782a-4277-ba29-311568404eee/volumes"
Sep 14 16:57:34 addons-657043 kubelet[2445]: I0914 16:57:34.734072 2445 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="605775e7-c176-4086-9177-20d29e1d3408" path="/var/lib/kubelet/pods/605775e7-c176-4086-9177-20d29e1d3408/volumes"
Sep 14 16:57:41 addons-657043 kubelet[2445]: E0914 16:57:41.726121 2445 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"registry-test\" with ImagePullBackOff: \"Back-off pulling image \\\"gcr.io/k8s-minikube/busybox\\\"\"" pod="default/registry-test" podUID="bc76eae9-820b-424d-aa8e-b7cb24a5fa90"
Sep 14 16:57:42 addons-657043 kubelet[2445]: E0914 16:57:42.726658 2445 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"busybox\" with ImagePullBackOff: \"Back-off pulling image \\\"gcr.io/k8s-minikube/busybox:1.28.4-glibc\\\"\"" pod="default/busybox" podUID="563fdef6-8639-4dd6-ae2d-776c266b720b"
Sep 14 16:57:50 addons-657043 kubelet[2445]: I0914 16:57:50.774413 2445 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2kkb5\" (UniqueName: \"kubernetes.io/projected/bc76eae9-820b-424d-aa8e-b7cb24a5fa90-kube-api-access-2kkb5\") pod \"bc76eae9-820b-424d-aa8e-b7cb24a5fa90\" (UID: \"bc76eae9-820b-424d-aa8e-b7cb24a5fa90\") "
Sep 14 16:57:50 addons-657043 kubelet[2445]: I0914 16:57:50.774480 2445 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"gcp-creds\" (UniqueName: \"kubernetes.io/host-path/bc76eae9-820b-424d-aa8e-b7cb24a5fa90-gcp-creds\") pod \"bc76eae9-820b-424d-aa8e-b7cb24a5fa90\" (UID: \"bc76eae9-820b-424d-aa8e-b7cb24a5fa90\") "
Sep 14 16:57:50 addons-657043 kubelet[2445]: I0914 16:57:50.774559 2445 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bc76eae9-820b-424d-aa8e-b7cb24a5fa90-gcp-creds" (OuterVolumeSpecName: "gcp-creds") pod "bc76eae9-820b-424d-aa8e-b7cb24a5fa90" (UID: "bc76eae9-820b-424d-aa8e-b7cb24a5fa90"). InnerVolumeSpecName "gcp-creds". PluginName "kubernetes.io/host-path", VolumeGidValue ""
Sep 14 16:57:50 addons-657043 kubelet[2445]: I0914 16:57:50.776417 2445 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc76eae9-820b-424d-aa8e-b7cb24a5fa90-kube-api-access-2kkb5" (OuterVolumeSpecName: "kube-api-access-2kkb5") pod "bc76eae9-820b-424d-aa8e-b7cb24a5fa90" (UID: "bc76eae9-820b-424d-aa8e-b7cb24a5fa90"). InnerVolumeSpecName "kube-api-access-2kkb5". PluginName "kubernetes.io/projected", VolumeGidValue ""
Sep 14 16:57:50 addons-657043 kubelet[2445]: I0914 16:57:50.874700 2445 reconciler_common.go:288] "Volume detached for volume \"kube-api-access-2kkb5\" (UniqueName: \"kubernetes.io/projected/bc76eae9-820b-424d-aa8e-b7cb24a5fa90-kube-api-access-2kkb5\") on node \"addons-657043\" DevicePath \"\""
Sep 14 16:57:50 addons-657043 kubelet[2445]: I0914 16:57:50.874736 2445 reconciler_common.go:288] "Volume detached for volume \"gcp-creds\" (UniqueName: \"kubernetes.io/host-path/bc76eae9-820b-424d-aa8e-b7cb24a5fa90-gcp-creds\") on node \"addons-657043\" DevicePath \"\""
Sep 14 16:57:51 addons-657043 kubelet[2445]: I0914 16:57:51.514895 2445 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f2md8\" (UniqueName: \"kubernetes.io/projected/fe7b5898-ffa3-4b87-b941-4220b03798f0-kube-api-access-f2md8\") pod \"fe7b5898-ffa3-4b87-b941-4220b03798f0\" (UID: \"fe7b5898-ffa3-4b87-b941-4220b03798f0\") "
Sep 14 16:57:51 addons-657043 kubelet[2445]: I0914 16:57:51.518711 2445 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fe7b5898-ffa3-4b87-b941-4220b03798f0-kube-api-access-f2md8" (OuterVolumeSpecName: "kube-api-access-f2md8") pod "fe7b5898-ffa3-4b87-b941-4220b03798f0" (UID: "fe7b5898-ffa3-4b87-b941-4220b03798f0"). InnerVolumeSpecName "kube-api-access-f2md8". PluginName "kubernetes.io/projected", VolumeGidValue ""
Sep 14 16:57:51 addons-657043 kubelet[2445]: I0914 16:57:51.615424 2445 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8qt94\" (UniqueName: \"kubernetes.io/projected/5b832366-d825-43bd-8e89-8b85bdb2fda4-kube-api-access-8qt94\") pod \"5b832366-d825-43bd-8e89-8b85bdb2fda4\" (UID: \"5b832366-d825-43bd-8e89-8b85bdb2fda4\") "
Sep 14 16:57:51 addons-657043 kubelet[2445]: I0914 16:57:51.615496 2445 reconciler_common.go:288] "Volume detached for volume \"kube-api-access-f2md8\" (UniqueName: \"kubernetes.io/projected/fe7b5898-ffa3-4b87-b941-4220b03798f0-kube-api-access-f2md8\") on node \"addons-657043\" DevicePath \"\""
Sep 14 16:57:51 addons-657043 kubelet[2445]: I0914 16:57:51.618078 2445 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b832366-d825-43bd-8e89-8b85bdb2fda4-kube-api-access-8qt94" (OuterVolumeSpecName: "kube-api-access-8qt94") pod "5b832366-d825-43bd-8e89-8b85bdb2fda4" (UID: "5b832366-d825-43bd-8e89-8b85bdb2fda4"). InnerVolumeSpecName "kube-api-access-8qt94". PluginName "kubernetes.io/projected", VolumeGidValue ""
Sep 14 16:57:51 addons-657043 kubelet[2445]: I0914 16:57:51.716572 2445 reconciler_common.go:288] "Volume detached for volume \"kube-api-access-8qt94\" (UniqueName: \"kubernetes.io/projected/5b832366-d825-43bd-8e89-8b85bdb2fda4-kube-api-access-8qt94\") on node \"addons-657043\" DevicePath \"\""
Sep 14 16:57:52 addons-657043 kubelet[2445]: I0914 16:57:52.247212 2445 scope.go:117] "RemoveContainer" containerID="a4b1db06a7155a537851755e5a7bed7f56c43eba134df8aff6ba8128ab076a81"
Sep 14 16:57:52 addons-657043 kubelet[2445]: I0914 16:57:52.268448 2445 scope.go:117] "RemoveContainer" containerID="a4b1db06a7155a537851755e5a7bed7f56c43eba134df8aff6ba8128ab076a81"
Sep 14 16:57:52 addons-657043 kubelet[2445]: E0914 16:57:52.269399 2445 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = Unknown desc = Error response from daemon: No such container: a4b1db06a7155a537851755e5a7bed7f56c43eba134df8aff6ba8128ab076a81" containerID="a4b1db06a7155a537851755e5a7bed7f56c43eba134df8aff6ba8128ab076a81"
Sep 14 16:57:52 addons-657043 kubelet[2445]: I0914 16:57:52.269450 2445 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"docker","ID":"a4b1db06a7155a537851755e5a7bed7f56c43eba134df8aff6ba8128ab076a81"} err="failed to get container status \"a4b1db06a7155a537851755e5a7bed7f56c43eba134df8aff6ba8128ab076a81\": rpc error: code = Unknown desc = Error response from daemon: No such container: a4b1db06a7155a537851755e5a7bed7f56c43eba134df8aff6ba8128ab076a81"
Sep 14 16:57:52 addons-657043 kubelet[2445]: I0914 16:57:52.269479 2445 scope.go:117] "RemoveContainer" containerID="760726f77392896a2a5fa5fdb10420838dd0c8ebea108f0582727f5395221658"
Sep 14 16:57:52 addons-657043 kubelet[2445]: I0914 16:57:52.316777 2445 scope.go:117] "RemoveContainer" containerID="760726f77392896a2a5fa5fdb10420838dd0c8ebea108f0582727f5395221658"
Sep 14 16:57:52 addons-657043 kubelet[2445]: E0914 16:57:52.317772 2445 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = Unknown desc = Error response from daemon: No such container: 760726f77392896a2a5fa5fdb10420838dd0c8ebea108f0582727f5395221658" containerID="760726f77392896a2a5fa5fdb10420838dd0c8ebea108f0582727f5395221658"
Sep 14 16:57:52 addons-657043 kubelet[2445]: I0914 16:57:52.317818 2445 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"docker","ID":"760726f77392896a2a5fa5fdb10420838dd0c8ebea108f0582727f5395221658"} err="failed to get container status \"760726f77392896a2a5fa5fdb10420838dd0c8ebea108f0582727f5395221658\": rpc error: code = Unknown desc = Error response from daemon: No such container: 760726f77392896a2a5fa5fdb10420838dd0c8ebea108f0582727f5395221658"
==> storage-provisioner [33c500f1c400] <==
I0914 16:45:14.532725 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I0914 16:45:14.625226 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I0914 16:45:14.625280 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I0914 16:45:14.828929 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I0914 16:45:14.829143 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_addons-657043_0cf1999e-adc0-409e-8fe9-a7e32ed467bd!
I0914 16:45:14.830399 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"0749bbe9-55ec-4a48-95eb-af8bb83b7018", APIVersion:"v1", ResourceVersion:"598", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' addons-657043_0cf1999e-adc0-409e-8fe9-a7e32ed467bd became leader
I0914 16:45:14.929640 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_addons-657043_0cf1999e-adc0-409e-8fe9-a7e32ed467bd!
-- /stdout --
helpers_test.go:254: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p addons-657043 -n addons-657043
helpers_test.go:261: (dbg) Run: kubectl --context addons-657043 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:272: non-running pods: busybox
helpers_test.go:274: ======> post-mortem[TestAddons/parallel/Registry]: describe non-running pods <======
helpers_test.go:277: (dbg) Run: kubectl --context addons-657043 describe pod busybox
helpers_test.go:282: (dbg) kubectl --context addons-657043 describe pod busybox:
-- stdout --
Name: busybox
Namespace: default
Priority: 0
Service Account: default
Node: addons-657043/192.168.49.2
Start Time: Sat, 14 Sep 2024 16:48:37 +0000
Labels: integration-test=busybox
Annotations: <none>
Status: Pending
IP: 10.244.0.28
IPs:
IP: 10.244.0.28
Containers:
busybox:
Container ID:
Image: gcr.io/k8s-minikube/busybox:1.28.4-glibc
Image ID:
Port: <none>
Host Port: <none>
Command:
sleep
3600
State: Waiting
Reason: ImagePullBackOff
Ready: False
Restart Count: 0
Environment:
GOOGLE_APPLICATION_CREDENTIALS: /google-app-creds.json
PROJECT_ID: this_is_fake
GCP_PROJECT: this_is_fake
GCLOUD_PROJECT: this_is_fake
GOOGLE_CLOUD_PROJECT: this_is_fake
CLOUDSDK_CORE_PROJECT: this_is_fake
Mounts:
/google-app-creds.json from gcp-creds (ro)
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-q7dkl (ro)
Conditions:
Type Status
PodReadyToStartContainers True
Initialized True
Ready False
ContainersReady False
PodScheduled True
Volumes:
kube-api-access-q7dkl:
Type: Projected (a volume that contains injected data from multiple sources)
TokenExpirationSeconds: 3607
ConfigMapName: kube-root-ca.crt
ConfigMapOptional: <nil>
DownwardAPI: true
gcp-creds:
Type: HostPath (bare host directory volume)
Path: /var/lib/minikube/google_application_credentials.json
HostPathType: File
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 9m16s default-scheduler Successfully assigned default/busybox to addons-657043
Normal Pulling 7m44s (x4 over 9m16s) kubelet Pulling image "gcr.io/k8s-minikube/busybox:1.28.4-glibc"
Warning Failed 7m44s (x4 over 9m16s) kubelet Failed to pull image "gcr.io/k8s-minikube/busybox:1.28.4-glibc": Error response from daemon: Head "https://gcr.io/v2/k8s-minikube/busybox/manifests/1.28.4-glibc": unauthorized: authentication failed
Warning Failed 7m44s (x4 over 9m16s) kubelet Error: ErrImagePull
Warning Failed 7m29s (x6 over 9m15s) kubelet Error: ImagePullBackOff
Normal BackOff 4m14s (x19 over 9m15s) kubelet Back-off pulling image "gcr.io/k8s-minikube/busybox:1.28.4-glibc"
-- /stdout --
helpers_test.go:285: <<< TestAddons/parallel/Registry FAILED: end of post-mortem logs <<<
helpers_test.go:286: ---------------------/post-mortem---------------------------------
--- FAIL: TestAddons/parallel/Registry (73.62s)