=== RUN TestAddons/parallel/Registry
=== PAUSE TestAddons/parallel/Registry
=== CONT TestAddons/parallel/Registry
addons_test.go:332: registry stabilized in 2.929058ms
addons_test.go:334: (dbg) TestAddons/parallel/Registry: waiting 6m0s for pods matching "actual-registry=true" in namespace "kube-system" ...
helpers_test.go:344: "registry-66c9cd494c-kh4r9" [b2944da3-d9b7-4de7-8a57-f934ec8b2970] Running
addons_test.go:334: (dbg) TestAddons/parallel/Registry: actual-registry=true healthy within 6.002665734s
addons_test.go:337: (dbg) TestAddons/parallel/Registry: waiting 10m0s for pods matching "registry-proxy=true" in namespace "kube-system" ...
helpers_test.go:344: "registry-proxy-cnkhj" [e15cccd8-7fcb-48c9-9dc2-e79744e87759] Running
addons_test.go:337: (dbg) TestAddons/parallel/Registry: registry-proxy=true healthy within 5.003371541s
addons_test.go:342: (dbg) Run: kubectl --context addons-457129 delete po -l run=registry-test --now
addons_test.go:347: (dbg) Run: kubectl --context addons-457129 run --rm registry-test --restart=Never --image=gcr.io/k8s-minikube/busybox -it -- sh -c "wget --spider -S http://registry.kube-system.svc.cluster.local"
addons_test.go:347: (dbg) Non-zero exit: kubectl --context addons-457129 run --rm registry-test --restart=Never --image=gcr.io/k8s-minikube/busybox -it -- sh -c "wget --spider -S http://registry.kube-system.svc.cluster.local": exit status 1 (1m0.075422024s)
-- stdout --
pod "registry-test" deleted
-- /stdout --
** stderr **
error: timed out waiting for the condition
** /stderr **
addons_test.go:349: failed to hit registry.kube-system.svc.cluster.local. args "kubectl --context addons-457129 run --rm registry-test --restart=Never --image=gcr.io/k8s-minikube/busybox -it -- sh -c \"wget --spider -S http://registry.kube-system.svc.cluster.local\"" failed: exit status 1
addons_test.go:353: expected curl response be "HTTP/1.1 200", but got *pod "registry-test" deleted
*
addons_test.go:361: (dbg) Run: out/minikube-linux-amd64 -p addons-457129 ip
2024/09/18 19:51:46 [DEBUG] GET http://192.168.49.2:5000
addons_test.go:390: (dbg) Run: out/minikube-linux-amd64 -p addons-457129 addons disable registry --alsologtostderr -v=1
helpers_test.go:222: -----------------------post-mortem--------------------------------
helpers_test.go:230: ======> post-mortem[TestAddons/parallel/Registry]: docker inspect <======
helpers_test.go:231: (dbg) Run: docker inspect addons-457129
helpers_test.go:235: (dbg) docker inspect addons-457129:
-- stdout --
[
{
"Id": "86ae18bdbb176a6a332ae7bb58a59c1a5a378dc69477093db6097b18df58d0ec",
"Created": "2024-09-18T19:38:45.341630422Z",
"Path": "/usr/local/bin/entrypoint",
"Args": [
"/sbin/init"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 16419,
"ExitCode": 0,
"Error": "",
"StartedAt": "2024-09-18T19:38:45.475652246Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:bb3bcbaabeeeadbf6b43ae7d1d07e504b3c8a94ec024df89bcb237eba4f5e9b3",
"ResolvConfPath": "/var/lib/docker/containers/86ae18bdbb176a6a332ae7bb58a59c1a5a378dc69477093db6097b18df58d0ec/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/86ae18bdbb176a6a332ae7bb58a59c1a5a378dc69477093db6097b18df58d0ec/hostname",
"HostsPath": "/var/lib/docker/containers/86ae18bdbb176a6a332ae7bb58a59c1a5a378dc69477093db6097b18df58d0ec/hosts",
"LogPath": "/var/lib/docker/containers/86ae18bdbb176a6a332ae7bb58a59c1a5a378dc69477093db6097b18df58d0ec/86ae18bdbb176a6a332ae7bb58a59c1a5a378dc69477093db6097b18df58d0ec-json.log",
"Name": "/addons-457129",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "unconfined",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/lib/modules:/lib/modules:ro",
"addons-457129:/var"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {
"max-size": "100m"
}
},
"NetworkMode": "addons-457129",
"PortBindings": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": ""
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"ConsoleSize": [
0,
0
],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": true,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [
"seccomp=unconfined",
"apparmor=unconfined",
"label=disable"
],
"Tmpfs": {
"/run": "",
"/tmp": ""
},
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"Isolation": "",
"CpuShares": 0,
"Memory": 4194304000,
"NanoCpus": 2000000000,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": [],
"BlkioDeviceWriteBps": [],
"BlkioDeviceReadIOps": [],
"BlkioDeviceWriteIOps": [],
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DeviceRequests": null,
"MemoryReservation": 0,
"MemorySwap": 8388608000,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": null,
"ReadonlyPaths": null
},
"GraphDriver": {
"Data": {
"LowerDir": "/var/lib/docker/overlay2/698e70b085bb54b196b1675e61d9cbaa57c45b260cc495c07302a96e38181855-init/diff:/var/lib/docker/overlay2/ea15ded7869e90879b7418dd3aef0d624c58276fe0ab3abf241b4159795e4858/diff",
"MergedDir": "/var/lib/docker/overlay2/698e70b085bb54b196b1675e61d9cbaa57c45b260cc495c07302a96e38181855/merged",
"UpperDir": "/var/lib/docker/overlay2/698e70b085bb54b196b1675e61d9cbaa57c45b260cc495c07302a96e38181855/diff",
"WorkDir": "/var/lib/docker/overlay2/698e70b085bb54b196b1675e61d9cbaa57c45b260cc495c07302a96e38181855/work"
},
"Name": "overlay2"
},
"Mounts": [
{
"Type": "bind",
"Source": "/lib/modules",
"Destination": "/lib/modules",
"Mode": "ro",
"RW": false,
"Propagation": "rprivate"
},
{
"Type": "volume",
"Name": "addons-457129",
"Source": "/var/lib/docker/volumes/addons-457129/_data",
"Destination": "/var",
"Driver": "local",
"Mode": "z",
"RW": true,
"Propagation": ""
}
],
"Config": {
"Hostname": "addons-457129",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"22/tcp": {},
"2376/tcp": {},
"32443/tcp": {},
"5000/tcp": {},
"8443/tcp": {}
},
"Tty": true,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"container=docker",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726589491-19662@sha256:6370b9fec173944088c2d87d44b01819c0ec611a83d9e2f38d36352dff8121a4",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": [
"/usr/local/bin/entrypoint",
"/sbin/init"
],
"OnBuild": null,
"Labels": {
"created_by.minikube.sigs.k8s.io": "true",
"mode.minikube.sigs.k8s.io": "addons-457129",
"name.minikube.sigs.k8s.io": "addons-457129",
"role.minikube.sigs.k8s.io": ""
},
"StopSignal": "SIGRTMIN+3"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "2edb383f4d1008747778d15f5662b7df94ab0f97eaff60f0cb90cc7a7830b0dd",
"SandboxKey": "/var/run/docker/netns/2edb383f4d10",
"Ports": {
"22/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32768"
}
],
"2376/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32769"
}
],
"32443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32772"
}
],
"5000/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32770"
}
],
"8443/tcp": [
{
"HostIp": "127.0.0.1",
"HostPort": "32771"
}
]
},
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"addons-457129": {
"IPAMConfig": {
"IPv4Address": "192.168.49.2"
},
"Links": null,
"Aliases": null,
"MacAddress": "02:42:c0:a8:31:02",
"DriverOpts": null,
"NetworkID": "9b2a7ee7c6b1ab882558f3e266a434c02ddbcdde6cce62dbbfb8ff76dd9746cd",
"EndpointID": "6485c6ab5197b94994de4a3c86171f209a04dc44211bbf1ade89f99b444d64e6",
"Gateway": "192.168.49.1",
"IPAddress": "192.168.49.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"DNSNames": [
"addons-457129",
"86ae18bdbb17"
]
}
}
}
}
]
-- /stdout --
helpers_test.go:239: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p addons-457129 -n addons-457129
helpers_test.go:244: <<< TestAddons/parallel/Registry FAILED: start of post-mortem logs <<<
helpers_test.go:245: ======> post-mortem[TestAddons/parallel/Registry]: minikube logs <======
helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 -p addons-457129 logs -n 25
helpers_test.go:252: TestAddons/parallel/Registry logs:
-- stdout --
==> Audit <==
|---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
| Command | Args | Profile | User | Version | Start Time | End Time |
|---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
| delete | -p download-docker-630296 | download-docker-630296 | jenkins | v1.34.0 | 18 Sep 24 19:38 UTC | 18 Sep 24 19:38 UTC |
| start | --download-only -p | binary-mirror-336155 | jenkins | v1.34.0 | 18 Sep 24 19:38 UTC | |
| | binary-mirror-336155 | | | | | |
| | --alsologtostderr | | | | | |
| | --binary-mirror | | | | | |
| | http://127.0.0.1:40427 | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| delete | -p binary-mirror-336155 | binary-mirror-336155 | jenkins | v1.34.0 | 18 Sep 24 19:38 UTC | 18 Sep 24 19:38 UTC |
| addons | enable dashboard -p | addons-457129 | jenkins | v1.34.0 | 18 Sep 24 19:38 UTC | |
| | addons-457129 | | | | | |
| addons | disable dashboard -p | addons-457129 | jenkins | v1.34.0 | 18 Sep 24 19:38 UTC | |
| | addons-457129 | | | | | |
| start | -p addons-457129 --wait=true | addons-457129 | jenkins | v1.34.0 | 18 Sep 24 19:38 UTC | 18 Sep 24 19:41 UTC |
| | --memory=4000 --alsologtostderr | | | | | |
| | --addons=registry | | | | | |
| | --addons=metrics-server | | | | | |
| | --addons=volumesnapshots | | | | | |
| | --addons=csi-hostpath-driver | | | | | |
| | --addons=gcp-auth | | | | | |
| | --addons=cloud-spanner | | | | | |
| | --addons=inspektor-gadget | | | | | |
| | --addons=storage-provisioner-rancher | | | | | |
| | --addons=nvidia-device-plugin | | | | | |
| | --addons=yakd --addons=volcano | | | | | |
| | --driver=docker | | | | | |
| | --container-runtime=docker | | | | | |
| | --addons=ingress | | | | | |
| | --addons=ingress-dns | | | | | |
| | --addons=helm-tiller | | | | | |
| addons | addons-457129 addons disable | addons-457129 | jenkins | v1.34.0 | 18 Sep 24 19:42 UTC | 18 Sep 24 19:42 UTC |
| | volcano --alsologtostderr -v=1 | | | | | |
| addons | addons-457129 addons disable | addons-457129 | jenkins | v1.34.0 | 18 Sep 24 19:50 UTC | 18 Sep 24 19:50 UTC |
| | yakd --alsologtostderr -v=1 | | | | | |
| addons | addons-457129 addons disable | addons-457129 | jenkins | v1.34.0 | 18 Sep 24 19:50 UTC | 18 Sep 24 19:50 UTC |
| | helm-tiller --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| addons | disable nvidia-device-plugin | addons-457129 | jenkins | v1.34.0 | 18 Sep 24 19:50 UTC | 18 Sep 24 19:50 UTC |
| | -p addons-457129 | | | | | |
| ssh | addons-457129 ssh cat | addons-457129 | jenkins | v1.34.0 | 18 Sep 24 19:50 UTC | 18 Sep 24 19:50 UTC |
| | /opt/local-path-provisioner/pvc-6c5c3a13-dc76-4ea5-ae23-b00403f48891_default_test-pvc/file1 | | | | | |
| addons | disable cloud-spanner -p | addons-457129 | jenkins | v1.34.0 | 18 Sep 24 19:50 UTC | 18 Sep 24 19:50 UTC |
| | addons-457129 | | | | | |
| addons | addons-457129 addons disable | addons-457129 | jenkins | v1.34.0 | 18 Sep 24 19:50 UTC | 18 Sep 24 19:51 UTC |
| | storage-provisioner-rancher | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | enable headlamp | addons-457129 | jenkins | v1.34.0 | 18 Sep 24 19:50 UTC | 18 Sep 24 19:50 UTC |
| | -p addons-457129 | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-457129 addons disable | addons-457129 | jenkins | v1.34.0 | 18 Sep 24 19:51 UTC | 18 Sep 24 19:51 UTC |
| | headlamp --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| addons | addons-457129 addons | addons-457129 | jenkins | v1.34.0 | 18 Sep 24 19:51 UTC | 18 Sep 24 19:51 UTC |
| | disable csi-hostpath-driver | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-457129 addons | addons-457129 | jenkins | v1.34.0 | 18 Sep 24 19:51 UTC | 18 Sep 24 19:51 UTC |
| | disable metrics-server | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | addons-457129 addons | addons-457129 | jenkins | v1.34.0 | 18 Sep 24 19:51 UTC | 18 Sep 24 19:51 UTC |
| | disable volumesnapshots | | | | | |
| | --alsologtostderr -v=1 | | | | | |
| addons | disable inspektor-gadget -p | addons-457129 | jenkins | v1.34.0 | 18 Sep 24 19:51 UTC | 18 Sep 24 19:51 UTC |
| | addons-457129 | | | | | |
| ssh | addons-457129 ssh curl -s | addons-457129 | jenkins | v1.34.0 | 18 Sep 24 19:51 UTC | 18 Sep 24 19:51 UTC |
| | http://127.0.0.1/ -H 'Host: | | | | | |
| | nginx.example.com' | | | | | |
| ip | addons-457129 ip | addons-457129 | jenkins | v1.34.0 | 18 Sep 24 19:51 UTC | 18 Sep 24 19:51 UTC |
| addons | addons-457129 addons disable | addons-457129 | jenkins | v1.34.0 | 18 Sep 24 19:51 UTC | 18 Sep 24 19:51 UTC |
| | ingress-dns --alsologtostderr | | | | | |
| | -v=1 | | | | | |
| addons | addons-457129 addons disable | addons-457129 | jenkins | v1.34.0 | 18 Sep 24 19:51 UTC | 18 Sep 24 19:51 UTC |
| | ingress --alsologtostderr -v=1 | | | | | |
| ip | addons-457129 ip | addons-457129 | jenkins | v1.34.0 | 18 Sep 24 19:51 UTC | 18 Sep 24 19:51 UTC |
| addons | addons-457129 addons disable | addons-457129 | jenkins | v1.34.0 | 18 Sep 24 19:51 UTC | 18 Sep 24 19:51 UTC |
| | registry --alsologtostderr | | | | | |
| | -v=1 | | | | | |
|---------|---------------------------------------------------------------------------------------------|------------------------|---------|---------|---------------------|---------------------|
==> Last Start <==
Log file created at: 2024/09/18 19:38:23
Running on machine: ubuntu-20-agent
Binary: Built with gc go1.23.0 for linux/amd64
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
I0918 19:38:23.569466 15685 out.go:345] Setting OutFile to fd 1 ...
I0918 19:38:23.569735 15685 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0918 19:38:23.569745 15685 out.go:358] Setting ErrFile to fd 2...
I0918 19:38:23.569749 15685 out.go:392] TERM=,COLORTERM=, which probably does not support color
I0918 19:38:23.569914 15685 root.go:338] Updating PATH: /home/jenkins/minikube-integration/19667-7499/.minikube/bin
I0918 19:38:23.570482 15685 out.go:352] Setting JSON to false
I0918 19:38:23.571265 15685 start.go:129] hostinfo: {"hostname":"ubuntu-20-agent","uptime":1250,"bootTime":1726687054,"procs":172,"os":"linux","platform":"ubuntu","platformFamily":"debian","platformVersion":"20.04","kernelVersion":"5.15.0-1069-gcp","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"591c9f12-2938-3743-e2bf-c56a050d43d1"}
I0918 19:38:23.571352 15685 start.go:139] virtualization: kvm guest
I0918 19:38:23.573522 15685 out.go:177] * [addons-457129] minikube v1.34.0 on Ubuntu 20.04 (kvm/amd64)
I0918 19:38:23.574820 15685 out.go:177] - MINIKUBE_LOCATION=19667
I0918 19:38:23.574831 15685 notify.go:220] Checking for updates...
I0918 19:38:23.576372 15685 out.go:177] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true
I0918 19:38:23.577712 15685 out.go:177] - KUBECONFIG=/home/jenkins/minikube-integration/19667-7499/kubeconfig
I0918 19:38:23.578842 15685 out.go:177] - MINIKUBE_HOME=/home/jenkins/minikube-integration/19667-7499/.minikube
I0918 19:38:23.580001 15685 out.go:177] - MINIKUBE_BIN=out/minikube-linux-amd64
I0918 19:38:23.581091 15685 out.go:177] - MINIKUBE_FORCE_SYSTEMD=
I0918 19:38:23.582399 15685 driver.go:394] Setting default libvirt URI to qemu:///system
I0918 19:38:23.604347 15685 docker.go:123] docker version: linux-27.2.1:Docker Engine - Community
I0918 19:38:23.604450 15685 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0918 19:38:23.649500 15685 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:30 OomKillDisable:true NGoroutines:45 SystemTime:2024-09-18 19:38:23.64080336 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1069-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:x86_
64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33647943680 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent Labels:[] ExperimentalBuild:false ServerVersion:27.2.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:7f7fdf5fed64eb6a7caf99b3e12efcf9d60e311c Expected:7f7fdf5fed64eb6a7caf99b3e12efcf9d60e311c} RuncCommit:{ID:v1.1.14-0-g2c9f560 Expected:v1.1.14-0-g2c9f560} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerErrors
:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.16.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.29.2] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0918 19:38:23.649606 15685 docker.go:318] overlay module found
I0918 19:38:23.651500 15685 out.go:177] * Using the docker driver based on user configuration
I0918 19:38:23.652684 15685 start.go:297] selected driver: docker
I0918 19:38:23.652698 15685 start.go:901] validating driver "docker" against <nil>
I0918 19:38:23.652707 15685 start.go:912] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error:<nil> Reason: Fix: Doc: Version:}
I0918 19:38:23.653470 15685 cli_runner.go:164] Run: docker system info --format "{{json .}}"
I0918 19:38:23.699134 15685 info.go:266] docker info: {ID:TS6T:UINC:MIYS:RZPA:KS6T:4JQK:7JHN:D6RA:LDP2:MHAE:G32M:C5NQ Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:1 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus:<nil> Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization:<nil> Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:30 OomKillDisable:true NGoroutines:45 SystemTime:2024-09-18 19:38:23.690640827 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:5.15.0-1069-gcp OperatingSystem:Ubuntu 20.04.6 LTS OSType:linux Architecture:x86
_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:8 MemTotal:33647943680 GenericResources:<nil> DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:ubuntu-20-agent Labels:[] ExperimentalBuild:false ServerVersion:27.2.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:<nil>} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:7f7fdf5fed64eb6a7caf99b3e12efcf9d60e311c Expected:7f7fdf5fed64eb6a7caf99b3e12efcf9d60e311c} RuncCommit:{ID:v1.1.14-0-g2c9f560 Expected:v1.1.14-0-g2c9f560} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=apparmor name=seccomp,profile=builtin] ProductLicense: Warnings:<nil> ServerError
s:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.16.2] map[Name:compose Path:/usr/libexec/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:v2.29.2] map[Name:scan Path:/usr/libexec/docker/cli-plugins/docker-scan SchemaVersion:0.1.0 ShortDescription:Docker Scan Vendor:Docker Inc. Version:v0.23.0]] Warnings:<nil>}}
I0918 19:38:23.699340 15685 start_flags.go:310] no existing cluster config was found, will generate one from the flags
I0918 19:38:23.699582 15685 start_flags.go:947] Waiting for all components: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0918 19:38:23.701373 15685 out.go:177] * Using Docker driver with root privileges
I0918 19:38:23.702907 15685 cni.go:84] Creating CNI manager for ""
I0918 19:38:23.702968 15685 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0918 19:38:23.702979 15685 start_flags.go:319] Found "bridge CNI" CNI - setting NetworkPlugin=cni
I0918 19:38:23.703043 15685 start.go:340] cluster config:
{Name:addons-457129 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726589491-19662@sha256:6370b9fec173944088c2d87d44b01819c0ec611a83d9e2f38d36352dff8121a4 Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.1 ClusterName:addons-457129 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime
:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock:
SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0918 19:38:23.704506 15685 out.go:177] * Starting "addons-457129" primary control-plane node in "addons-457129" cluster
I0918 19:38:23.705945 15685 cache.go:121] Beginning downloading kic base image for docker with docker
I0918 19:38:23.707470 15685 out.go:177] * Pulling base image v0.0.45-1726589491-19662 ...
I0918 19:38:23.708774 15685 preload.go:131] Checking if preload exists for k8s version v1.31.1 and runtime docker
I0918 19:38:23.708815 15685 preload.go:146] Found local preload: /home/jenkins/minikube-integration/19667-7499/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.1-docker-overlay2-amd64.tar.lz4
I0918 19:38:23.708823 15685 cache.go:56] Caching tarball of preloaded images
I0918 19:38:23.708864 15685 image.go:79] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726589491-19662@sha256:6370b9fec173944088c2d87d44b01819c0ec611a83d9e2f38d36352dff8121a4 in local docker daemon
I0918 19:38:23.708914 15685 preload.go:172] Found /home/jenkins/minikube-integration/19667-7499/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.1-docker-overlay2-amd64.tar.lz4 in cache, skipping download
I0918 19:38:23.708925 15685 cache.go:59] Finished verifying existence of preloaded tar for v1.31.1 on docker
I0918 19:38:23.709249 15685 profile.go:143] Saving config to /home/jenkins/minikube-integration/19667-7499/.minikube/profiles/addons-457129/config.json ...
I0918 19:38:23.709276 15685 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19667-7499/.minikube/profiles/addons-457129/config.json: {Name:mka729d6965602732b51ee9a521ac58b736578e8 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0918 19:38:23.725542 15685 cache.go:149] Downloading gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726589491-19662@sha256:6370b9fec173944088c2d87d44b01819c0ec611a83d9e2f38d36352dff8121a4 to local cache
I0918 19:38:23.725680 15685 image.go:63] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726589491-19662@sha256:6370b9fec173944088c2d87d44b01819c0ec611a83d9e2f38d36352dff8121a4 in local cache directory
I0918 19:38:23.725702 15685 image.go:66] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726589491-19662@sha256:6370b9fec173944088c2d87d44b01819c0ec611a83d9e2f38d36352dff8121a4 in local cache directory, skipping pull
I0918 19:38:23.725708 15685 image.go:135] gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726589491-19662@sha256:6370b9fec173944088c2d87d44b01819c0ec611a83d9e2f38d36352dff8121a4 exists in cache, skipping pull
I0918 19:38:23.725718 15685 cache.go:152] successfully saved gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726589491-19662@sha256:6370b9fec173944088c2d87d44b01819c0ec611a83d9e2f38d36352dff8121a4 as a tarball
I0918 19:38:23.725729 15685 cache.go:162] Loading gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726589491-19662@sha256:6370b9fec173944088c2d87d44b01819c0ec611a83d9e2f38d36352dff8121a4 from local cache
I0918 19:38:35.829046 15685 cache.go:164] successfully loaded and using gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726589491-19662@sha256:6370b9fec173944088c2d87d44b01819c0ec611a83d9e2f38d36352dff8121a4 from cached tarball
I0918 19:38:35.829080 15685 cache.go:194] Successfully downloaded all kic artifacts
I0918 19:38:35.829135 15685 start.go:360] acquireMachinesLock for addons-457129: {Name:mke4c12172cee324e5328d55e67a0eafaa50413d Clock:{} Delay:500ms Timeout:10m0s Cancel:<nil>}
I0918 19:38:35.829234 15685 start.go:364] duration metric: took 74.658µs to acquireMachinesLock for "addons-457129"
I0918 19:38:35.829261 15685 start.go:93] Provisioning new machine with config: &{Name:addons-457129 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726589491-19662@sha256:6370b9fec173944088c2d87d44b01819c0ec611a83d9e2f38d36352dff8121a4 Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.1 ClusterName:addons-457129 Namespace:default APIServerHAVIP: APIServerName:min
ikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false Cust
omQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}
I0918 19:38:35.829369 15685 start.go:125] createHost starting for "" (driver="docker")
I0918 19:38:35.831280 15685 out.go:235] * Creating docker container (CPUs=2, Memory=4000MB) ...
I0918 19:38:35.831492 15685 start.go:159] libmachine.API.Create for "addons-457129" (driver="docker")
I0918 19:38:35.831520 15685 client.go:168] LocalClient.Create starting
I0918 19:38:35.831598 15685 main.go:141] libmachine: Creating CA: /home/jenkins/minikube-integration/19667-7499/.minikube/certs/ca.pem
I0918 19:38:36.101727 15685 main.go:141] libmachine: Creating client certificate: /home/jenkins/minikube-integration/19667-7499/.minikube/certs/cert.pem
I0918 19:38:36.382495 15685 cli_runner.go:164] Run: docker network inspect addons-457129 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
W0918 19:38:36.397710 15685 cli_runner.go:211] docker network inspect addons-457129 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1
I0918 19:38:36.397773 15685 network_create.go:284] running [docker network inspect addons-457129] to gather additional debugging logs...
I0918 19:38:36.397790 15685 cli_runner.go:164] Run: docker network inspect addons-457129
W0918 19:38:36.412413 15685 cli_runner.go:211] docker network inspect addons-457129 returned with exit code 1
I0918 19:38:36.412444 15685 network_create.go:287] error running [docker network inspect addons-457129]: docker network inspect addons-457129: exit status 1
stdout:
[]
stderr:
Error response from daemon: network addons-457129 not found
I0918 19:38:36.412457 15685 network_create.go:289] output of [docker network inspect addons-457129]: -- stdout --
[]
-- /stdout --
** stderr **
Error response from daemon: network addons-457129 not found
** /stderr **
I0918 19:38:36.412542 15685 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0918 19:38:36.427766 15685 network.go:206] using free private subnet 192.168.49.0/24: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001a74720}
I0918 19:38:36.427805 15685 network_create.go:124] attempt to create docker network addons-457129 192.168.49.0/24 with gateway 192.168.49.1 and MTU of 1500 ...
I0918 19:38:36.427841 15685 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.49.0/24 --gateway=192.168.49.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=addons-457129 addons-457129
I0918 19:38:36.484531 15685 network_create.go:108] docker network addons-457129 192.168.49.0/24 created
I0918 19:38:36.484557 15685 kic.go:121] calculated static IP "192.168.49.2" for the "addons-457129" container
I0918 19:38:36.484609 15685 cli_runner.go:164] Run: docker ps -a --format {{.Names}}
I0918 19:38:36.498902 15685 cli_runner.go:164] Run: docker volume create addons-457129 --label name.minikube.sigs.k8s.io=addons-457129 --label created_by.minikube.sigs.k8s.io=true
I0918 19:38:36.515877 15685 oci.go:103] Successfully created a docker volume addons-457129
I0918 19:38:36.515949 15685 cli_runner.go:164] Run: docker run --rm --name addons-457129-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-457129 --entrypoint /usr/bin/test -v addons-457129:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726589491-19662@sha256:6370b9fec173944088c2d87d44b01819c0ec611a83d9e2f38d36352dff8121a4 -d /var/lib
I0918 19:38:41.437774 15685 cli_runner.go:217] Completed: docker run --rm --name addons-457129-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-457129 --entrypoint /usr/bin/test -v addons-457129:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726589491-19662@sha256:6370b9fec173944088c2d87d44b01819c0ec611a83d9e2f38d36352dff8121a4 -d /var/lib: (4.921784865s)
I0918 19:38:41.437798 15685 oci.go:107] Successfully prepared a docker volume addons-457129
I0918 19:38:41.437824 15685 preload.go:131] Checking if preload exists for k8s version v1.31.1 and runtime docker
I0918 19:38:41.437851 15685 kic.go:194] Starting extracting preloaded images to volume ...
I0918 19:38:41.437921 15685 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/19667-7499/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.1-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v addons-457129:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726589491-19662@sha256:6370b9fec173944088c2d87d44b01819c0ec611a83d9e2f38d36352dff8121a4 -I lz4 -xf /preloaded.tar -C /extractDir
I0918 19:38:45.282408 15685 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/jenkins/minikube-integration/19667-7499/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.31.1-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v addons-457129:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726589491-19662@sha256:6370b9fec173944088c2d87d44b01819c0ec611a83d9e2f38d36352dff8121a4 -I lz4 -xf /preloaded.tar -C /extractDir: (3.844436932s)
I0918 19:38:45.282442 15685 kic.go:203] duration metric: took 3.844589302s to extract preloaded images to volume ...
W0918 19:38:45.282565 15685 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted.
I0918 19:38:45.282682 15685 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'"
I0918 19:38:45.327087 15685 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname addons-457129 --name addons-457129 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=addons-457129 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=addons-457129 --network addons-457129 --ip 192.168.49.2 --volume addons-457129:/var --security-opt apparmor=unconfined --memory=4000mb --cpus=2 -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726589491-19662@sha256:6370b9fec173944088c2d87d44b01819c0ec611a83d9e2f38d36352dff8121a4
I0918 19:38:45.645581 15685 cli_runner.go:164] Run: docker container inspect addons-457129 --format={{.State.Running}}
I0918 19:38:45.662786 15685 cli_runner.go:164] Run: docker container inspect addons-457129 --format={{.State.Status}}
I0918 19:38:45.681025 15685 cli_runner.go:164] Run: docker exec addons-457129 stat /var/lib/dpkg/alternatives/iptables
I0918 19:38:45.726959 15685 oci.go:144] the created container "addons-457129" has a running status.
I0918 19:38:45.726987 15685 kic.go:225] Creating ssh key for kic: /home/jenkins/minikube-integration/19667-7499/.minikube/machines/addons-457129/id_rsa...
I0918 19:38:46.002939 15685 kic_runner.go:191] docker (temp): /home/jenkins/minikube-integration/19667-7499/.minikube/machines/addons-457129/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes)
I0918 19:38:46.034517 15685 cli_runner.go:164] Run: docker container inspect addons-457129 --format={{.State.Status}}
I0918 19:38:46.053857 15685 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys
I0918 19:38:46.053877 15685 kic_runner.go:114] Args: [docker exec --privileged addons-457129 chown docker:docker /home/docker/.ssh/authorized_keys]
I0918 19:38:46.120401 15685 cli_runner.go:164] Run: docker container inspect addons-457129 --format={{.State.Status}}
I0918 19:38:46.139859 15685 machine.go:93] provisionDockerMachine start ...
I0918 19:38:46.139967 15685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-457129
I0918 19:38:46.155870 15685 main.go:141] libmachine: Using SSH client type: native
I0918 19:38:46.156127 15685 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x86c560] 0x86f240 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0918 19:38:46.156149 15685 main.go:141] libmachine: About to run SSH command:
hostname
I0918 19:38:46.296061 15685 main.go:141] libmachine: SSH cmd err, output: <nil>: addons-457129
I0918 19:38:46.296091 15685 ubuntu.go:169] provisioning hostname "addons-457129"
I0918 19:38:46.296160 15685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-457129
I0918 19:38:46.313542 15685 main.go:141] libmachine: Using SSH client type: native
I0918 19:38:46.313711 15685 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x86c560] 0x86f240 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0918 19:38:46.313725 15685 main.go:141] libmachine: About to run SSH command:
sudo hostname addons-457129 && echo "addons-457129" | sudo tee /etc/hostname
I0918 19:38:46.458628 15685 main.go:141] libmachine: SSH cmd err, output: <nil>: addons-457129
I0918 19:38:46.458725 15685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-457129
I0918 19:38:46.475198 15685 main.go:141] libmachine: Using SSH client type: native
I0918 19:38:46.475363 15685 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x86c560] 0x86f240 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0918 19:38:46.475398 15685 main.go:141] libmachine: About to run SSH command:
if ! grep -xq '.*\saddons-457129' /etc/hosts; then
if grep -xq '127.0.1.1\s.*' /etc/hosts; then
sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 addons-457129/g' /etc/hosts;
else
echo '127.0.1.1 addons-457129' | sudo tee -a /etc/hosts;
fi
fi
I0918 19:38:46.608734 15685 main.go:141] libmachine: SSH cmd err, output: <nil>:
I0918 19:38:46.608766 15685 ubuntu.go:175] set auth options {CertDir:/home/jenkins/minikube-integration/19667-7499/.minikube CaCertPath:/home/jenkins/minikube-integration/19667-7499/.minikube/certs/ca.pem CaPrivateKeyPath:/home/jenkins/minikube-integration/19667-7499/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/jenkins/minikube-integration/19667-7499/.minikube/machines/server.pem ServerKeyPath:/home/jenkins/minikube-integration/19667-7499/.minikube/machines/server-key.pem ClientKeyPath:/home/jenkins/minikube-integration/19667-7499/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/jenkins/minikube-integration/19667-7499/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/jenkins/minikube-integration/19667-7499/.minikube}
I0918 19:38:46.608793 15685 ubuntu.go:177] setting up certificates
I0918 19:38:46.608802 15685 provision.go:84] configureAuth start
I0918 19:38:46.608852 15685 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-457129
I0918 19:38:46.624741 15685 provision.go:143] copyHostCerts
I0918 19:38:46.624820 15685 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19667-7499/.minikube/certs/cert.pem --> /home/jenkins/minikube-integration/19667-7499/.minikube/cert.pem (1123 bytes)
I0918 19:38:46.624973 15685 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19667-7499/.minikube/certs/key.pem --> /home/jenkins/minikube-integration/19667-7499/.minikube/key.pem (1679 bytes)
I0918 19:38:46.625059 15685 exec_runner.go:151] cp: /home/jenkins/minikube-integration/19667-7499/.minikube/certs/ca.pem --> /home/jenkins/minikube-integration/19667-7499/.minikube/ca.pem (1082 bytes)
I0918 19:38:46.625131 15685 provision.go:117] generating server cert: /home/jenkins/minikube-integration/19667-7499/.minikube/machines/server.pem ca-key=/home/jenkins/minikube-integration/19667-7499/.minikube/certs/ca.pem private-key=/home/jenkins/minikube-integration/19667-7499/.minikube/certs/ca-key.pem org=jenkins.addons-457129 san=[127.0.0.1 192.168.49.2 addons-457129 localhost minikube]
I0918 19:38:46.680800 15685 provision.go:177] copyRemoteCerts
I0918 19:38:46.680860 15685 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker
I0918 19:38:46.680920 15685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-457129
I0918 19:38:46.697775 15685 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19667-7499/.minikube/machines/addons-457129/id_rsa Username:docker}
I0918 19:38:46.793131 15685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19667-7499/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes)
I0918 19:38:46.814284 15685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19667-7499/.minikube/machines/server.pem --> /etc/docker/server.pem (1208 bytes)
I0918 19:38:46.834547 15685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19667-7499/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes)
I0918 19:38:46.854534 15685 provision.go:87] duration metric: took 245.719267ms to configureAuth
I0918 19:38:46.854566 15685 ubuntu.go:193] setting minikube options for container-runtime
I0918 19:38:46.854778 15685 config.go:182] Loaded profile config "addons-457129": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.31.1
I0918 19:38:46.854835 15685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-457129
I0918 19:38:46.870949 15685 main.go:141] libmachine: Using SSH client type: native
I0918 19:38:46.871138 15685 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x86c560] 0x86f240 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0918 19:38:46.871152 15685 main.go:141] libmachine: About to run SSH command:
df --output=fstype / | tail -n 1
I0918 19:38:47.000938 15685 main.go:141] libmachine: SSH cmd err, output: <nil>: overlay
I0918 19:38:47.000958 15685 ubuntu.go:71] root file system type: overlay
I0918 19:38:47.001070 15685 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ...
I0918 19:38:47.001132 15685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-457129
I0918 19:38:47.017517 15685 main.go:141] libmachine: Using SSH client type: native
I0918 19:38:47.017725 15685 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x86c560] 0x86f240 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0918 19:38:47.017820 15685 main.go:141] libmachine: About to run SSH command:
sudo mkdir -p /lib/systemd/system && printf %s "[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP \$MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
" | sudo tee /lib/systemd/system/docker.service.new
I0918 19:38:47.158731 15685 main.go:141] libmachine: SSH cmd err, output: <nil>: [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket
StartLimitBurst=3
StartLimitIntervalSec=60
[Service]
Type=notify
Restart=on-failure
# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
# The base configuration already specifies an 'ExecStart=...' command. The first directive
# here is to clear out that command inherited from the base configuration. Without this,
# the command from the base configuration and the command specified here are treated as
# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
# will catch this invalid input and refuse to start the service with an error like:
# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
I0918 19:38:47.158806 15685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-457129
I0918 19:38:47.175635 15685 main.go:141] libmachine: Using SSH client type: native
I0918 19:38:47.175853 15685 main.go:141] libmachine: &{{{<nil> 0 [] [] []} docker [0x86c560] 0x86f240 <nil> [] 0s} 127.0.0.1 32768 <nil> <nil>}
I0918 19:38:47.175877 15685 main.go:141] libmachine: About to run SSH command:
sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; }
I0918 19:38:47.838436 15685 main.go:141] libmachine: SSH cmd err, output: <nil>: --- /lib/systemd/system/docker.service 2024-09-06 12:06:41.000000000 +0000
+++ /lib/systemd/system/docker.service.new 2024-09-18 19:38:47.154509418 +0000
@@ -1,46 +1,49 @@
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
-After=network-online.target docker.socket firewalld.service containerd.service time-set.target
-Wants=network-online.target containerd.service
+BindsTo=containerd.service
+After=network-online.target firewalld.service containerd.service
+Wants=network-online.target
Requires=docker.socket
+StartLimitBurst=3
+StartLimitIntervalSec=60
[Service]
Type=notify
-# the default is not to use systemd for cgroups because the delegate issues still
-# exists and systemd currently does not support the cgroup feature set required
-# for containers run by docker
-ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
-ExecReload=/bin/kill -s HUP $MAINPID
-TimeoutStartSec=0
-RestartSec=2
-Restart=always
+Restart=on-failure
-# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
-# Both the old, and new location are accepted by systemd 229 and up, so using the old location
-# to make them work for either version of systemd.
-StartLimitBurst=3
-# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
-# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
-# this option work for either version of systemd.
-StartLimitInterval=60s
+
+# This file is a systemd drop-in unit that inherits from the base dockerd configuration.
+# The base configuration already specifies an 'ExecStart=...' command. The first directive
+# here is to clear out that command inherited from the base configuration. Without this,
+# the command from the base configuration and the command specified here are treated as
+# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd
+# will catch this invalid input and refuse to start the service with an error like:
+# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services.
+
+# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
+# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
+ExecStart=
+ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12
+ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
+LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
-# Comment TasksMax if your systemd version does not support it.
-# Only systemd 226 and above support this option.
+# Uncomment TasksMax if your systemd version supports it.
+# Only systemd 226 and above support this version.
TasksMax=infinity
+TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
-OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
Synchronizing state of docker.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable docker
I0918 19:38:47.838462 15685 machine.go:96] duration metric: took 1.698573616s to provisionDockerMachine
I0918 19:38:47.838473 15685 client.go:171] duration metric: took 12.006947819s to LocalClient.Create
I0918 19:38:47.838487 15685 start.go:167] duration metric: took 12.006997743s to libmachine.API.Create "addons-457129"
I0918 19:38:47.838493 15685 start.go:293] postStartSetup for "addons-457129" (driver="docker")
I0918 19:38:47.838502 15685 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs]
I0918 19:38:47.838544 15685 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs
I0918 19:38:47.838577 15685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-457129
I0918 19:38:47.854338 15685 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19667-7499/.minikube/machines/addons-457129/id_rsa Username:docker}
I0918 19:38:47.949297 15685 ssh_runner.go:195] Run: cat /etc/os-release
I0918 19:38:47.952151 15685 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found
I0918 19:38:47.952179 15685 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found
I0918 19:38:47.952187 15685 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found
I0918 19:38:47.952193 15685 info.go:137] Remote host: Ubuntu 22.04.5 LTS
I0918 19:38:47.952203 15685 filesync.go:126] Scanning /home/jenkins/minikube-integration/19667-7499/.minikube/addons for local assets ...
I0918 19:38:47.952263 15685 filesync.go:126] Scanning /home/jenkins/minikube-integration/19667-7499/.minikube/files for local assets ...
I0918 19:38:47.952286 15685 start.go:296] duration metric: took 113.78694ms for postStartSetup
I0918 19:38:47.952658 15685 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-457129
I0918 19:38:47.969067 15685 profile.go:143] Saving config to /home/jenkins/minikube-integration/19667-7499/.minikube/profiles/addons-457129/config.json ...
I0918 19:38:47.969307 15685 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'"
I0918 19:38:47.969346 15685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-457129
I0918 19:38:47.984314 15685 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19667-7499/.minikube/machines/addons-457129/id_rsa Username:docker}
I0918 19:38:48.073339 15685 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'"
I0918 19:38:48.077048 15685 start.go:128] duration metric: took 12.247666146s to createHost
I0918 19:38:48.077069 15685 start.go:83] releasing machines lock for "addons-457129", held for 12.247822412s
I0918 19:38:48.077132 15685 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" addons-457129
I0918 19:38:48.092963 15685 ssh_runner.go:195] Run: cat /version.json
I0918 19:38:48.093005 15685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-457129
I0918 19:38:48.093055 15685 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/
I0918 19:38:48.093132 15685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-457129
I0918 19:38:48.108746 15685 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19667-7499/.minikube/machines/addons-457129/id_rsa Username:docker}
I0918 19:38:48.109990 15685 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19667-7499/.minikube/machines/addons-457129/id_rsa Username:docker}
I0918 19:38:48.273433 15685 ssh_runner.go:195] Run: systemctl --version
I0918 19:38:48.277469 15685 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*"
I0918 19:38:48.281202 15685 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ;
I0918 19:38:48.302726 15685 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found
I0918 19:38:48.302785 15685 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ;
I0918 19:38:48.326980 15685 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s)
I0918 19:38:48.327006 15685 start.go:495] detecting cgroup driver to use...
I0918 19:38:48.327034 15685 detect.go:187] detected "cgroupfs" cgroup driver on host os
I0918 19:38:48.327122 15685 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock
" | sudo tee /etc/crictl.yaml"
I0918 19:38:48.340854 15685 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10"|' /etc/containerd/config.toml"
I0918 19:38:48.349601 15685 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml"
I0918 19:38:48.358047 15685 containerd.go:146] configuring containerd to use "cgroupfs" as cgroup driver...
I0918 19:38:48.358095 15685 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = false|g' /etc/containerd/config.toml"
I0918 19:38:48.366528 15685 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0918 19:38:48.375027 15685 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml"
I0918 19:38:48.383213 15685 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml"
I0918 19:38:48.391480 15685 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk"
I0918 19:38:48.399331 15685 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml"
I0918 19:38:48.407762 15685 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml"
I0918 19:38:48.416079 15685 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml"
I0918 19:38:48.424649 15685 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables
I0918 19:38:48.431627 15685 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
I0918 19:38:48.438598 15685 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0918 19:38:48.510259 15685 ssh_runner.go:195] Run: sudo systemctl restart containerd
I0918 19:38:48.591524 15685 start.go:495] detecting cgroup driver to use...
I0918 19:38:48.591573 15685 detect.go:187] detected "cgroupfs" cgroup driver on host os
I0918 19:38:48.591618 15685 ssh_runner.go:195] Run: sudo systemctl cat docker.service
I0918 19:38:48.603403 15685 cruntime.go:279] skipping containerd shutdown because we are bound to it
I0918 19:38:48.603467 15685 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio
I0918 19:38:48.615719 15685 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock
" | sudo tee /etc/crictl.yaml"
I0918 19:38:48.630388 15685 ssh_runner.go:195] Run: which cri-dockerd
I0918 19:38:48.633830 15685 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d
I0918 19:38:48.641889 15685 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (190 bytes)
I0918 19:38:48.660561 15685 ssh_runner.go:195] Run: sudo systemctl unmask docker.service
I0918 19:38:48.744514 15685 ssh_runner.go:195] Run: sudo systemctl enable docker.socket
I0918 19:38:48.842218 15685 docker.go:574] configuring docker to use "cgroupfs" as cgroup driver...
I0918 19:38:48.842357 15685 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (130 bytes)
I0918 19:38:48.858718 15685 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0918 19:38:48.938531 15685 ssh_runner.go:195] Run: sudo systemctl restart docker
I0918 19:38:49.186983 15685 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket
I0918 19:38:49.197319 15685 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0918 19:38:49.207395 15685 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket
I0918 19:38:49.286590 15685 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket
I0918 19:38:49.362493 15685 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0918 19:38:49.438398 15685 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket
I0918 19:38:49.450034 15685 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service
I0918 19:38:49.459397 15685 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0918 19:38:49.531120 15685 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service
I0918 19:38:49.590479 15685 start.go:542] Will wait 60s for socket path /var/run/cri-dockerd.sock
I0918 19:38:49.590567 15685 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock
I0918 19:38:49.594022 15685 start.go:563] Will wait 60s for crictl version
I0918 19:38:49.594072 15685 ssh_runner.go:195] Run: which crictl
I0918 19:38:49.597027 15685 ssh_runner.go:195] Run: sudo /usr/bin/crictl version
I0918 19:38:49.627421 15685 start.go:579] Version: 0.1.0
RuntimeName: docker
RuntimeVersion: 27.2.1
RuntimeApiVersion: v1
I0918 19:38:49.627481 15685 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0918 19:38:49.650685 15685 ssh_runner.go:195] Run: docker version --format {{.Server.Version}}
I0918 19:38:49.675764 15685 out.go:235] * Preparing Kubernetes v1.31.1 on Docker 27.2.1 ...
I0918 19:38:49.675843 15685 cli_runner.go:164] Run: docker network inspect addons-457129 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}"
I0918 19:38:49.691945 15685 ssh_runner.go:195] Run: grep 192.168.49.1 host.minikube.internal$ /etc/hosts
I0918 19:38:49.695394 15685 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.49.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0918 19:38:49.705021 15685 kubeadm.go:883] updating cluster {Name:addons-457129 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726589491-19662@sha256:6370b9fec173944088c2d87d44b01819c0ec611a83d9e2f38d36352dff8121a4 Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.1 ClusterName:addons-457129 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNa
mes:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuF
irmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ...
I0918 19:38:49.705122 15685 preload.go:131] Checking if preload exists for k8s version v1.31.1 and runtime docker
I0918 19:38:49.705163 15685 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0918 19:38:49.722133 15685 docker.go:685] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.31.1
registry.k8s.io/kube-scheduler:v1.31.1
registry.k8s.io/kube-controller-manager:v1.31.1
registry.k8s.io/kube-proxy:v1.31.1
registry.k8s.io/coredns/coredns:v1.11.3
registry.k8s.io/etcd:3.5.15-0
registry.k8s.io/pause:3.10
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I0918 19:38:49.722152 15685 docker.go:615] Images already preloaded, skipping extraction
I0918 19:38:49.722214 15685 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}}
I0918 19:38:49.740042 15685 docker.go:685] Got preloaded images: -- stdout --
registry.k8s.io/kube-apiserver:v1.31.1
registry.k8s.io/kube-scheduler:v1.31.1
registry.k8s.io/kube-controller-manager:v1.31.1
registry.k8s.io/kube-proxy:v1.31.1
registry.k8s.io/coredns/coredns:v1.11.3
registry.k8s.io/etcd:3.5.15-0
registry.k8s.io/pause:3.10
gcr.io/k8s-minikube/storage-provisioner:v5
-- /stdout --
I0918 19:38:49.740070 15685 cache_images.go:84] Images are preloaded, skipping loading
I0918 19:38:49.740084 15685 kubeadm.go:934] updating node { 192.168.49.2 8443 v1.31.1 docker true true} ...
I0918 19:38:49.740171 15685 kubeadm.go:946] kubelet [Unit]
Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.31.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=addons-457129 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.49.2
[Install]
config:
{KubernetesVersion:v1.31.1 ClusterName:addons-457129 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:}
I0918 19:38:49.740219 15685 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}}
I0918 19:38:49.782129 15685 cni.go:84] Creating CNI manager for ""
I0918 19:38:49.782172 15685 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0918 19:38:49.782187 15685 kubeadm.go:84] Using pod CIDR: 10.244.0.0/16
I0918 19:38:49.782212 15685 kubeadm.go:181] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.49.2 APIServerPort:8443 KubernetesVersion:v1.31.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:addons-457129 NodeName:addons-457129 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.49.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.49.2 CgroupDriver:cgroupfs ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kuber
netes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true}
I0918 19:38:49.782367 15685 kubeadm.go:187] kubeadm config:
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.49.2
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
name: "addons-457129"
kubeletExtraArgs:
node-ip: 192.168.49.2
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "192.168.49.2"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.31.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: cgroupfs
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
hairpinMode: hairpin-veth
runtimeRequestTimeout: 15m
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
conntrack:
maxPerCore: 0
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
I0918 19:38:49.782433 15685 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.31.1
I0918 19:38:49.790449 15685 binaries.go:44] Found k8s binaries, skipping transfer
I0918 19:38:49.790510 15685 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube
I0918 19:38:49.798113 15685 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (312 bytes)
I0918 19:38:49.813765 15685 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes)
I0918 19:38:49.828935 15685 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2155 bytes)
I0918 19:38:49.844051 15685 ssh_runner.go:195] Run: grep 192.168.49.2 control-plane.minikube.internal$ /etc/hosts
I0918 19:38:49.846953 15685 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.49.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts""
I0918 19:38:49.856075 15685 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0918 19:38:49.928293 15685 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0918 19:38:49.940485 15685 certs.go:68] Setting up /home/jenkins/minikube-integration/19667-7499/.minikube/profiles/addons-457129 for IP: 192.168.49.2
I0918 19:38:49.940512 15685 certs.go:194] generating shared ca certs ...
I0918 19:38:49.940530 15685 certs.go:226] acquiring lock for ca certs: {Name:mke16e4aeb0a19696e8eeda873787e346a3aedef Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0918 19:38:49.940662 15685 certs.go:240] generating "minikubeCA" ca cert: /home/jenkins/minikube-integration/19667-7499/.minikube/ca.key
I0918 19:38:50.215011 15685 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19667-7499/.minikube/ca.crt ...
I0918 19:38:50.215044 15685 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19667-7499/.minikube/ca.crt: {Name:mke7570d54eb2a335e899bf4483c7f0c3ad906b6 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0918 19:38:50.215209 15685 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19667-7499/.minikube/ca.key ...
I0918 19:38:50.215220 15685 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19667-7499/.minikube/ca.key: {Name:mk4b313b658836d54e02bdf2cf120987af39599a Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0918 19:38:50.215290 15685 certs.go:240] generating "proxyClientCA" ca cert: /home/jenkins/minikube-integration/19667-7499/.minikube/proxy-client-ca.key
I0918 19:38:50.397960 15685 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19667-7499/.minikube/proxy-client-ca.crt ...
I0918 19:38:50.397993 15685 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19667-7499/.minikube/proxy-client-ca.crt: {Name:mka480476b5617857b0fbf7151893a6910e8e832 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0918 19:38:50.398186 15685 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19667-7499/.minikube/proxy-client-ca.key ...
I0918 19:38:50.398201 15685 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19667-7499/.minikube/proxy-client-ca.key: {Name:mk17996b59c93b625ae6ff32125b5099ef8f8e16 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0918 19:38:50.398292 15685 certs.go:256] generating profile certs ...
I0918 19:38:50.398347 15685 certs.go:363] generating signed profile cert for "minikube-user": /home/jenkins/minikube-integration/19667-7499/.minikube/profiles/addons-457129/client.key
I0918 19:38:50.398370 15685 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19667-7499/.minikube/profiles/addons-457129/client.crt with IP's: []
I0918 19:38:50.576553 15685 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19667-7499/.minikube/profiles/addons-457129/client.crt ...
I0918 19:38:50.576582 15685 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19667-7499/.minikube/profiles/addons-457129/client.crt: {Name:mk85a27d5b4acd7703a7376736958e4cff952462 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0918 19:38:50.576765 15685 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19667-7499/.minikube/profiles/addons-457129/client.key ...
I0918 19:38:50.576778 15685 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19667-7499/.minikube/profiles/addons-457129/client.key: {Name:mk393b0aa6dd1883bc7a2f069bf6fc8062dbec33 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0918 19:38:50.576876 15685 certs.go:363] generating signed profile cert for "minikube": /home/jenkins/minikube-integration/19667-7499/.minikube/profiles/addons-457129/apiserver.key.fa6788af
I0918 19:38:50.576917 15685 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19667-7499/.minikube/profiles/addons-457129/apiserver.crt.fa6788af with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.49.2]
I0918 19:38:50.707997 15685 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19667-7499/.minikube/profiles/addons-457129/apiserver.crt.fa6788af ...
I0918 19:38:50.708027 15685 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19667-7499/.minikube/profiles/addons-457129/apiserver.crt.fa6788af: {Name:mkf0afe480165b2f1d4a54167c97cac2b2c240fd Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0918 19:38:50.708205 15685 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19667-7499/.minikube/profiles/addons-457129/apiserver.key.fa6788af ...
I0918 19:38:50.708219 15685 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19667-7499/.minikube/profiles/addons-457129/apiserver.key.fa6788af: {Name:mka1ab6cac40a4ef83bdeacc31672241483f72a6 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0918 19:38:50.708312 15685 certs.go:381] copying /home/jenkins/minikube-integration/19667-7499/.minikube/profiles/addons-457129/apiserver.crt.fa6788af -> /home/jenkins/minikube-integration/19667-7499/.minikube/profiles/addons-457129/apiserver.crt
I0918 19:38:50.708387 15685 certs.go:385] copying /home/jenkins/minikube-integration/19667-7499/.minikube/profiles/addons-457129/apiserver.key.fa6788af -> /home/jenkins/minikube-integration/19667-7499/.minikube/profiles/addons-457129/apiserver.key
I0918 19:38:50.708431 15685 certs.go:363] generating signed profile cert for "aggregator": /home/jenkins/minikube-integration/19667-7499/.minikube/profiles/addons-457129/proxy-client.key
I0918 19:38:50.708448 15685 crypto.go:68] Generating cert /home/jenkins/minikube-integration/19667-7499/.minikube/profiles/addons-457129/proxy-client.crt with IP's: []
I0918 19:38:50.949639 15685 crypto.go:156] Writing cert to /home/jenkins/minikube-integration/19667-7499/.minikube/profiles/addons-457129/proxy-client.crt ...
I0918 19:38:50.949667 15685 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19667-7499/.minikube/profiles/addons-457129/proxy-client.crt: {Name:mkb4d4d2438a9a9ee90325c2bd1e2af985f610c8 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0918 19:38:50.949822 15685 crypto.go:164] Writing key to /home/jenkins/minikube-integration/19667-7499/.minikube/profiles/addons-457129/proxy-client.key ...
I0918 19:38:50.949832 15685 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19667-7499/.minikube/profiles/addons-457129/proxy-client.key: {Name:mk17bca11dccc60e1d889659c97451eeaccd6427 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0918 19:38:50.950001 15685 certs.go:484] found cert: /home/jenkins/minikube-integration/19667-7499/.minikube/certs/ca-key.pem (1675 bytes)
I0918 19:38:50.950034 15685 certs.go:484] found cert: /home/jenkins/minikube-integration/19667-7499/.minikube/certs/ca.pem (1082 bytes)
I0918 19:38:50.950057 15685 certs.go:484] found cert: /home/jenkins/minikube-integration/19667-7499/.minikube/certs/cert.pem (1123 bytes)
I0918 19:38:50.950080 15685 certs.go:484] found cert: /home/jenkins/minikube-integration/19667-7499/.minikube/certs/key.pem (1679 bytes)
I0918 19:38:50.950672 15685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19667-7499/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes)
I0918 19:38:50.972043 15685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19667-7499/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes)
I0918 19:38:50.993090 15685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19667-7499/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes)
I0918 19:38:51.014045 15685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19667-7499/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes)
I0918 19:38:51.036090 15685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19667-7499/.minikube/profiles/addons-457129/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1419 bytes)
I0918 19:38:51.057857 15685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19667-7499/.minikube/profiles/addons-457129/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes)
I0918 19:38:51.079015 15685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19667-7499/.minikube/profiles/addons-457129/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes)
I0918 19:38:51.101088 15685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19667-7499/.minikube/profiles/addons-457129/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes)
I0918 19:38:51.122402 15685 ssh_runner.go:362] scp /home/jenkins/minikube-integration/19667-7499/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes)
I0918 19:38:51.143906 15685 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes)
I0918 19:38:51.159528 15685 ssh_runner.go:195] Run: openssl version
I0918 19:38:51.164365 15685 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem"
I0918 19:38:51.172458 15685 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem
I0918 19:38:51.175317 15685 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Sep 18 19:38 /usr/share/ca-certificates/minikubeCA.pem
I0918 19:38:51.175361 15685 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem
I0918 19:38:51.181376 15685 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0"
I0918 19:38:51.189826 15685 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt
I0918 19:38:51.192755 15685 certs.go:399] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1
stdout:
stderr:
stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory
I0918 19:38:51.192799 15685 kubeadm.go:392] StartCluster: {Name:addons-457129 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.45-1726589491-19662@sha256:6370b9fec173944088c2d87d44b01819c0ec611a83d9e2f38d36352dff8121a4 Memory:4000 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.31.1 ClusterName:addons-457129 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames
:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop:<nil> ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/jenkins:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirm
warePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s}
I0918 19:38:51.192909 15685 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}}
I0918 19:38:51.209644 15685 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd
I0918 19:38:51.217575 15685 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml
I0918 19:38:51.225734 15685 kubeadm.go:214] ignoring SystemVerification for kubeadm because of docker driver
I0918 19:38:51.225784 15685 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf
I0918 19:38:51.233303 15685 kubeadm.go:155] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory
ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory
ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory
ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory
I0918 19:38:51.233323 15685 kubeadm.go:157] found existing configuration files:
I0918 19:38:51.233358 15685 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf
I0918 19:38:51.240945 15685 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/admin.conf: No such file or directory
I0918 19:38:51.241008 15685 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf
I0918 19:38:51.247943 15685 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf
I0918 19:38:51.255143 15685 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/kubelet.conf: No such file or directory
I0918 19:38:51.255188 15685 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf
I0918 19:38:51.262452 15685 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf
I0918 19:38:51.270149 15685 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/controller-manager.conf: No such file or directory
I0918 19:38:51.270201 15685 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf
I0918 19:38:51.277906 15685 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf
I0918 19:38:51.285416 15685 kubeadm.go:163] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2
stdout:
stderr:
grep: /etc/kubernetes/scheduler.conf: No such file or directory
I0918 19:38:51.285482 15685 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf
I0918 19:38:51.292664 15685 ssh_runner.go:286] Start: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.31.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
I0918 19:38:51.324592 15685 kubeadm.go:310] W0918 19:38:51.323861 1925 common.go:101] your configuration file uses a deprecated API spec: "kubeadm.k8s.io/v1beta3" (kind: "ClusterConfiguration"). Please use 'kubeadm config migrate --old-config old.yaml --new-config new.yaml', which will write the new, similar spec using a newer API version.
I0918 19:38:51.325066 15685 kubeadm.go:310] W0918 19:38:51.324532 1925 common.go:101] your configuration file uses a deprecated API spec: "kubeadm.k8s.io/v1beta3" (kind: "InitConfiguration"). Please use 'kubeadm config migrate --old-config old.yaml --new-config new.yaml', which will write the new, similar spec using a newer API version.
I0918 19:38:51.346983 15685 kubeadm.go:310] [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.15.0-1069-gcp\n", err: exit status 1
I0918 19:38:51.396333 15685 kubeadm.go:310] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
I0918 19:39:00.061702 15685 kubeadm.go:310] [init] Using Kubernetes version: v1.31.1
I0918 19:39:00.061774 15685 kubeadm.go:310] [preflight] Running pre-flight checks
I0918 19:39:00.061883 15685 kubeadm.go:310] [preflight] The system verification failed. Printing the output from the verification:
I0918 19:39:00.061937 15685 kubeadm.go:310] [0;37mKERNEL_VERSION[0m: [0;32m5.15.0-1069-gcp[0m
I0918 19:39:00.061968 15685 kubeadm.go:310] [0;37mOS[0m: [0;32mLinux[0m
I0918 19:39:00.062026 15685 kubeadm.go:310] [0;37mCGROUPS_CPU[0m: [0;32menabled[0m
I0918 19:39:00.062111 15685 kubeadm.go:310] [0;37mCGROUPS_CPUACCT[0m: [0;32menabled[0m
I0918 19:39:00.062168 15685 kubeadm.go:310] [0;37mCGROUPS_CPUSET[0m: [0;32menabled[0m
I0918 19:39:00.062222 15685 kubeadm.go:310] [0;37mCGROUPS_DEVICES[0m: [0;32menabled[0m
I0918 19:39:00.062272 15685 kubeadm.go:310] [0;37mCGROUPS_FREEZER[0m: [0;32menabled[0m
I0918 19:39:00.062326 15685 kubeadm.go:310] [0;37mCGROUPS_MEMORY[0m: [0;32menabled[0m
I0918 19:39:00.062393 15685 kubeadm.go:310] [0;37mCGROUPS_PIDS[0m: [0;32menabled[0m
I0918 19:39:00.062475 15685 kubeadm.go:310] [0;37mCGROUPS_HUGETLB[0m: [0;32menabled[0m
I0918 19:39:00.062548 15685 kubeadm.go:310] [0;37mCGROUPS_BLKIO[0m: [0;32menabled[0m
I0918 19:39:00.062643 15685 kubeadm.go:310] [preflight] Pulling images required for setting up a Kubernetes cluster
I0918 19:39:00.062793 15685 kubeadm.go:310] [preflight] This might take a minute or two, depending on the speed of your internet connection
I0918 19:39:00.062905 15685 kubeadm.go:310] [preflight] You can also perform this action beforehand using 'kubeadm config images pull'
I0918 19:39:00.062995 15685 kubeadm.go:310] [certs] Using certificateDir folder "/var/lib/minikube/certs"
I0918 19:39:00.064520 15685 out.go:235] - Generating certificates and keys ...
I0918 19:39:00.064606 15685 kubeadm.go:310] [certs] Using existing ca certificate authority
I0918 19:39:00.064683 15685 kubeadm.go:310] [certs] Using existing apiserver certificate and key on disk
I0918 19:39:00.064771 15685 kubeadm.go:310] [certs] Generating "apiserver-kubelet-client" certificate and key
I0918 19:39:00.064851 15685 kubeadm.go:310] [certs] Generating "front-proxy-ca" certificate and key
I0918 19:39:00.064965 15685 kubeadm.go:310] [certs] Generating "front-proxy-client" certificate and key
I0918 19:39:00.065041 15685 kubeadm.go:310] [certs] Generating "etcd/ca" certificate and key
I0918 19:39:00.065120 15685 kubeadm.go:310] [certs] Generating "etcd/server" certificate and key
I0918 19:39:00.065250 15685 kubeadm.go:310] [certs] etcd/server serving cert is signed for DNS names [addons-457129 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0918 19:39:00.065331 15685 kubeadm.go:310] [certs] Generating "etcd/peer" certificate and key
I0918 19:39:00.065497 15685 kubeadm.go:310] [certs] etcd/peer serving cert is signed for DNS names [addons-457129 localhost] and IPs [192.168.49.2 127.0.0.1 ::1]
I0918 19:39:00.065573 15685 kubeadm.go:310] [certs] Generating "etcd/healthcheck-client" certificate and key
I0918 19:39:00.065657 15685 kubeadm.go:310] [certs] Generating "apiserver-etcd-client" certificate and key
I0918 19:39:00.065737 15685 kubeadm.go:310] [certs] Generating "sa" key and public key
I0918 19:39:00.065829 15685 kubeadm.go:310] [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I0918 19:39:00.065915 15685 kubeadm.go:310] [kubeconfig] Writing "admin.conf" kubeconfig file
I0918 19:39:00.066005 15685 kubeadm.go:310] [kubeconfig] Writing "super-admin.conf" kubeconfig file
I0918 19:39:00.066092 15685 kubeadm.go:310] [kubeconfig] Writing "kubelet.conf" kubeconfig file
I0918 19:39:00.066191 15685 kubeadm.go:310] [kubeconfig] Writing "controller-manager.conf" kubeconfig file
I0918 19:39:00.066263 15685 kubeadm.go:310] [kubeconfig] Writing "scheduler.conf" kubeconfig file
I0918 19:39:00.066389 15685 kubeadm.go:310] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
I0918 19:39:00.066467 15685 kubeadm.go:310] [control-plane] Using manifest folder "/etc/kubernetes/manifests"
I0918 19:39:00.068128 15685 out.go:235] - Booting up control plane ...
I0918 19:39:00.068203 15685 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-apiserver"
I0918 19:39:00.068295 15685 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-controller-manager"
I0918 19:39:00.068382 15685 kubeadm.go:310] [control-plane] Creating static Pod manifest for "kube-scheduler"
I0918 19:39:00.068472 15685 kubeadm.go:310] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
I0918 19:39:00.068555 15685 kubeadm.go:310] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I0918 19:39:00.068599 15685 kubeadm.go:310] [kubelet-start] Starting the kubelet
I0918 19:39:00.068718 15685 kubeadm.go:310] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
I0918 19:39:00.068817 15685 kubeadm.go:310] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
I0918 19:39:00.068871 15685 kubeadm.go:310] [kubelet-check] The kubelet is healthy after 501.056589ms
I0918 19:39:00.068964 15685 kubeadm.go:310] [api-check] Waiting for a healthy API server. This can take up to 4m0s
I0918 19:39:00.069015 15685 kubeadm.go:310] [api-check] The API server is healthy after 4.502057368s
I0918 19:39:00.069105 15685 kubeadm.go:310] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I0918 19:39:00.069219 15685 kubeadm.go:310] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
I0918 19:39:00.069269 15685 kubeadm.go:310] [upload-certs] Skipping phase. Please see --upload-certs
I0918 19:39:00.069446 15685 kubeadm.go:310] [mark-control-plane] Marking the node addons-457129 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
I0918 19:39:00.069532 15685 kubeadm.go:310] [bootstrap-token] Using token: hdrfj8.iw5e001tr9gn1zt0
I0918 19:39:00.071170 15685 out.go:235] - Configuring RBAC rules ...
I0918 19:39:00.071297 15685 kubeadm.go:310] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
I0918 19:39:00.071369 15685 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
I0918 19:39:00.071518 15685 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
I0918 19:39:00.071670 15685 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
I0918 19:39:00.071805 15685 kubeadm.go:310] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
I0918 19:39:00.071918 15685 kubeadm.go:310] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
I0918 19:39:00.072067 15685 kubeadm.go:310] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
I0918 19:39:00.072117 15685 kubeadm.go:310] [addons] Applied essential addon: CoreDNS
I0918 19:39:00.072157 15685 kubeadm.go:310] [addons] Applied essential addon: kube-proxy
I0918 19:39:00.072163 15685 kubeadm.go:310]
I0918 19:39:00.072214 15685 kubeadm.go:310] Your Kubernetes control-plane has initialized successfully!
I0918 19:39:00.072221 15685 kubeadm.go:310]
I0918 19:39:00.072283 15685 kubeadm.go:310] To start using your cluster, you need to run the following as a regular user:
I0918 19:39:00.072289 15685 kubeadm.go:310]
I0918 19:39:00.072313 15685 kubeadm.go:310] mkdir -p $HOME/.kube
I0918 19:39:00.072379 15685 kubeadm.go:310] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
I0918 19:39:00.072433 15685 kubeadm.go:310] sudo chown $(id -u):$(id -g) $HOME/.kube/config
I0918 19:39:00.072439 15685 kubeadm.go:310]
I0918 19:39:00.072483 15685 kubeadm.go:310] Alternatively, if you are the root user, you can run:
I0918 19:39:00.072492 15685 kubeadm.go:310]
I0918 19:39:00.072554 15685 kubeadm.go:310] export KUBECONFIG=/etc/kubernetes/admin.conf
I0918 19:39:00.072567 15685 kubeadm.go:310]
I0918 19:39:00.072639 15685 kubeadm.go:310] You should now deploy a pod network to the cluster.
I0918 19:39:00.072747 15685 kubeadm.go:310] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
I0918 19:39:00.072850 15685 kubeadm.go:310] https://kubernetes.io/docs/concepts/cluster-administration/addons/
I0918 19:39:00.072863 15685 kubeadm.go:310]
I0918 19:39:00.072976 15685 kubeadm.go:310] You can now join any number of control-plane nodes by copying certificate authorities
I0918 19:39:00.073058 15685 kubeadm.go:310] and service account keys on each node and then running the following as root:
I0918 19:39:00.073067 15685 kubeadm.go:310]
I0918 19:39:00.073143 15685 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token hdrfj8.iw5e001tr9gn1zt0 \
I0918 19:39:00.073237 15685 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:895a1a4b821247c3b3bc3d8674fdaf9ae4007075fb12e882893c62d1438babd8 \
I0918 19:39:00.073273 15685 kubeadm.go:310] --control-plane
I0918 19:39:00.073282 15685 kubeadm.go:310]
I0918 19:39:00.073401 15685 kubeadm.go:310] Then you can join any number of worker nodes by running the following on each as root:
I0918 19:39:00.073411 15685 kubeadm.go:310]
I0918 19:39:00.073523 15685 kubeadm.go:310] kubeadm join control-plane.minikube.internal:8443 --token hdrfj8.iw5e001tr9gn1zt0 \
I0918 19:39:00.073680 15685 kubeadm.go:310] --discovery-token-ca-cert-hash sha256:895a1a4b821247c3b3bc3d8674fdaf9ae4007075fb12e882893c62d1438babd8
I0918 19:39:00.073695 15685 cni.go:84] Creating CNI manager for ""
I0918 19:39:00.073716 15685 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge
I0918 19:39:00.075247 15685 out.go:177] * Configuring bridge CNI (Container Networking Interface) ...
I0918 19:39:00.076352 15685 ssh_runner.go:195] Run: sudo mkdir -p /etc/cni/net.d
I0918 19:39:00.084706 15685 ssh_runner.go:362] scp memory --> /etc/cni/net.d/1-k8s.conflist (496 bytes)
I0918 19:39:00.100640 15685 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj"
I0918 19:39:00.100700 15685 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig
I0918 19:39:00.100704 15685 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes addons-457129 minikube.k8s.io/updated_at=2024_09_18T19_39_00_0700 minikube.k8s.io/version=v1.34.0 minikube.k8s.io/commit=85073601a832bd4bbda5d11fa91feafff6ec6b91 minikube.k8s.io/name=addons-457129 minikube.k8s.io/primary=true
I0918 19:39:00.209837 15685 ops.go:34] apiserver oom_adj: -16
I0918 19:39:00.209978 15685 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0918 19:39:00.710131 15685 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0918 19:39:01.210809 15685 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0918 19:39:01.710745 15685 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0918 19:39:02.210820 15685 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0918 19:39:02.710989 15685 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0918 19:39:03.210887 15685 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0918 19:39:03.710018 15685 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0918 19:39:04.210129 15685 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.31.1/kubectl get sa default --kubeconfig=/var/lib/minikube/kubeconfig
I0918 19:39:04.272501 15685 kubeadm.go:1113] duration metric: took 4.17185943s to wait for elevateKubeSystemPrivileges
I0918 19:39:04.272541 15685 kubeadm.go:394] duration metric: took 13.079743528s to StartCluster
I0918 19:39:04.272560 15685 settings.go:142] acquiring lock: {Name:mk761415fdfe0253120f9b1dbb6bb2fd172fca68 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0918 19:39:04.272688 15685 settings.go:150] Updating kubeconfig: /home/jenkins/minikube-integration/19667-7499/kubeconfig
I0918 19:39:04.273162 15685 lock.go:35] WriteFile acquiring /home/jenkins/minikube-integration/19667-7499/kubeconfig: {Name:mk31083525ef7f1419e6532910512baf7d24e908 Clock:{} Delay:500ms Timeout:1m0s Cancel:<nil>}
I0918 19:39:04.273389 15685 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml"
I0918 19:39:04.273418 15685 start.go:235] Will wait 6m0s for node &{Name: IP:192.168.49.2 Port:8443 KubernetesVersion:v1.31.1 ContainerRuntime:docker ControlPlane:true Worker:true}
I0918 19:39:04.273474 15685 addons.go:507] enable addons start: toEnable=map[ambassador:false auto-pause:false cloud-spanner:true csi-hostpath-driver:true dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:true gvisor:false headlamp:false helm-tiller:true inaccel:false ingress:true ingress-dns:true inspektor-gadget:true istio:false istio-provisioner:false kong:false kubeflow:false kubevirt:false logviewer:false metallb:false metrics-server:true nvidia-device-plugin:true nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:true registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false storage-provisioner-rancher:true volcano:true volumesnapshots:true yakd:true]
I0918 19:39:04.273590 15685 addons.go:69] Setting yakd=true in profile "addons-457129"
I0918 19:39:04.273595 15685 addons.go:69] Setting gcp-auth=true in profile "addons-457129"
I0918 19:39:04.273617 15685 addons.go:69] Setting cloud-spanner=true in profile "addons-457129"
I0918 19:39:04.273628 15685 addons.go:69] Setting storage-provisioner=true in profile "addons-457129"
I0918 19:39:04.273632 15685 addons.go:234] Setting addon cloud-spanner=true in "addons-457129"
I0918 19:39:04.273639 15685 mustload.go:65] Loading cluster: addons-457129
I0918 19:39:04.273647 15685 addons.go:69] Setting storage-provisioner-rancher=true in profile "addons-457129"
I0918 19:39:04.273635 15685 addons.go:69] Setting nvidia-device-plugin=true in profile "addons-457129"
I0918 19:39:04.273658 15685 config.go:182] Loaded profile config "addons-457129": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.31.1
I0918 19:39:04.273666 15685 host.go:66] Checking if "addons-457129" exists ...
I0918 19:39:04.273671 15685 addons.go:69] Setting volcano=true in profile "addons-457129"
I0918 19:39:04.273682 15685 addons.go:69] Setting ingress=true in profile "addons-457129"
I0918 19:39:04.273688 15685 addons.go:234] Setting addon volcano=true in "addons-457129"
I0918 19:39:04.273662 15685 addons_storage_classes.go:33] enableOrDisableStorageClasses storage-provisioner-rancher=true on "addons-457129"
I0918 19:39:04.273706 15685 addons.go:69] Setting inspektor-gadget=true in profile "addons-457129"
I0918 19:39:04.273721 15685 host.go:66] Checking if "addons-457129" exists ...
I0918 19:39:04.273728 15685 addons.go:69] Setting metrics-server=true in profile "addons-457129"
I0918 19:39:04.273711 15685 addons.go:69] Setting default-storageclass=true in profile "addons-457129"
I0918 19:39:04.273738 15685 addons.go:234] Setting addon metrics-server=true in "addons-457129"
I0918 19:39:04.273762 15685 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "addons-457129"
I0918 19:39:04.273770 15685 host.go:66] Checking if "addons-457129" exists ...
I0918 19:39:04.273833 15685 config.go:182] Loaded profile config "addons-457129": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.31.1
I0918 19:39:04.274030 15685 cli_runner.go:164] Run: docker container inspect addons-457129 --format={{.State.Status}}
I0918 19:39:04.274034 15685 cli_runner.go:164] Run: docker container inspect addons-457129 --format={{.State.Status}}
I0918 19:39:04.274074 15685 cli_runner.go:164] Run: docker container inspect addons-457129 --format={{.State.Status}}
I0918 19:39:04.274184 15685 cli_runner.go:164] Run: docker container inspect addons-457129 --format={{.State.Status}}
I0918 19:39:04.274202 15685 cli_runner.go:164] Run: docker container inspect addons-457129 --format={{.State.Status}}
I0918 19:39:04.274232 15685 cli_runner.go:164] Run: docker container inspect addons-457129 --format={{.State.Status}}
I0918 19:39:04.273672 15685 addons.go:69] Setting helm-tiller=true in profile "addons-457129"
I0918 19:39:04.274401 15685 addons.go:234] Setting addon helm-tiller=true in "addons-457129"
I0918 19:39:04.274442 15685 host.go:66] Checking if "addons-457129" exists ...
I0918 19:39:04.273692 15685 addons.go:234] Setting addon ingress=true in "addons-457129"
I0918 19:39:04.274519 15685 host.go:66] Checking if "addons-457129" exists ...
I0918 19:39:04.273640 15685 addons.go:234] Setting addon storage-provisioner=true in "addons-457129"
I0918 19:39:04.274666 15685 host.go:66] Checking if "addons-457129" exists ...
I0918 19:39:04.274910 15685 cli_runner.go:164] Run: docker container inspect addons-457129 --format={{.State.Status}}
I0918 19:39:04.273611 15685 addons.go:234] Setting addon yakd=true in "addons-457129"
I0918 19:39:04.275014 15685 host.go:66] Checking if "addons-457129" exists ...
I0918 19:39:04.275017 15685 cli_runner.go:164] Run: docker container inspect addons-457129 --format={{.State.Status}}
I0918 19:39:04.273607 15685 addons.go:69] Setting csi-hostpath-driver=true in profile "addons-457129"
I0918 19:39:04.275307 15685 addons.go:234] Setting addon csi-hostpath-driver=true in "addons-457129"
I0918 19:39:04.275364 15685 host.go:66] Checking if "addons-457129" exists ...
I0918 19:39:04.273620 15685 addons.go:69] Setting registry=true in profile "addons-457129"
I0918 19:39:04.275423 15685 addons.go:69] Setting volumesnapshots=true in profile "addons-457129"
I0918 19:39:04.273700 15685 addons.go:69] Setting ingress-dns=true in profile "addons-457129"
I0918 19:39:04.275579 15685 addons.go:234] Setting addon ingress-dns=true in "addons-457129"
I0918 19:39:04.275663 15685 addons.go:234] Setting addon registry=true in "addons-457129"
I0918 19:39:04.275691 15685 host.go:66] Checking if "addons-457129" exists ...
I0918 19:39:04.275800 15685 cli_runner.go:164] Run: docker container inspect addons-457129 --format={{.State.Status}}
I0918 19:39:04.273664 15685 addons.go:234] Setting addon nvidia-device-plugin=true in "addons-457129"
I0918 19:39:04.275895 15685 host.go:66] Checking if "addons-457129" exists ...
I0918 19:39:04.276155 15685 cli_runner.go:164] Run: docker container inspect addons-457129 --format={{.State.Status}}
I0918 19:39:04.276347 15685 host.go:66] Checking if "addons-457129" exists ...
I0918 19:39:04.276418 15685 cli_runner.go:164] Run: docker container inspect addons-457129 --format={{.State.Status}}
I0918 19:39:04.305442 15685 out.go:177] * Verifying Kubernetes components...
I0918 19:39:04.305740 15685 cli_runner.go:164] Run: docker container inspect addons-457129 --format={{.State.Status}}
I0918 19:39:04.275466 15685 addons.go:234] Setting addon volumesnapshots=true in "addons-457129"
I0918 19:39:04.306731 15685 host.go:66] Checking if "addons-457129" exists ...
I0918 19:39:04.307319 15685 cli_runner.go:164] Run: docker container inspect addons-457129 --format={{.State.Status}}
I0918 19:39:04.273721 15685 addons.go:234] Setting addon inspektor-gadget=true in "addons-457129"
I0918 19:39:04.307506 15685 host.go:66] Checking if "addons-457129" exists ...
I0918 19:39:04.307826 15685 cli_runner.go:164] Run: docker container inspect addons-457129 --format={{.State.Status}}
I0918 19:39:04.310809 15685 cli_runner.go:164] Run: docker container inspect addons-457129 --format={{.State.Status}}
I0918 19:39:04.311995 15685 ssh_runner.go:195] Run: sudo systemctl daemon-reload
I0918 19:39:04.313080 15685 addons.go:234] Setting addon storage-provisioner-rancher=true in "addons-457129"
I0918 19:39:04.313146 15685 host.go:66] Checking if "addons-457129" exists ...
I0918 19:39:04.313696 15685 cli_runner.go:164] Run: docker container inspect addons-457129 --format={{.State.Status}}
I0918 19:39:04.318322 15685 cli_runner.go:164] Run: docker container inspect addons-457129 --format={{.State.Status}}
I0918 19:39:04.326224 15685 out.go:177] - Using image ghcr.io/helm/tiller:v2.17.0
I0918 19:39:04.328163 15685 addons.go:431] installing /etc/kubernetes/addons/helm-tiller-dp.yaml
I0918 19:39:04.328186 15685 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/helm-tiller-dp.yaml (2422 bytes)
I0918 19:39:04.328252 15685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-457129
I0918 19:39:04.328741 15685 out.go:177] - Using image gcr.io/k8s-minikube/kube-registry-proxy:0.0.6
I0918 19:39:04.330198 15685 addons.go:234] Setting addon default-storageclass=true in "addons-457129"
I0918 19:39:04.330237 15685 host.go:66] Checking if "addons-457129" exists ...
I0918 19:39:04.330799 15685 cli_runner.go:164] Run: docker container inspect addons-457129 --format={{.State.Status}}
I0918 19:39:04.335513 15685 out.go:177] - Using image docker.io/registry:2.8.3
I0918 19:39:04.335673 15685 out.go:177] - Using image docker.io/marcnuri/yakd:0.0.5
I0918 19:39:04.337163 15685 out.go:177] - Using image gcr.io/cloud-spanner-emulator/emulator:1.5.23
I0918 19:39:04.337228 15685 addons.go:431] installing /etc/kubernetes/addons/yakd-ns.yaml
I0918 19:39:04.337240 15685 ssh_runner.go:362] scp yakd/yakd-ns.yaml --> /etc/kubernetes/addons/yakd-ns.yaml (171 bytes)
I0918 19:39:04.337242 15685 addons.go:431] installing /etc/kubernetes/addons/registry-rc.yaml
I0918 19:39:04.337256 15685 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-rc.yaml (860 bytes)
I0918 19:39:04.337303 15685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-457129
I0918 19:39:04.337322 15685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-457129
I0918 19:39:04.339036 15685 out.go:177] - Using image docker.io/volcanosh/vc-webhook-manager:v1.9.0
I0918 19:39:04.339282 15685 addons.go:431] installing /etc/kubernetes/addons/deployment.yaml
I0918 19:39:04.339298 15685 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/deployment.yaml (1004 bytes)
I0918 19:39:04.339355 15685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-457129
I0918 19:39:04.342053 15685 out.go:177] - Using image docker.io/volcanosh/vc-controller-manager:v1.9.0
I0918 19:39:04.344416 15685 out.go:177] - Using image docker.io/volcanosh/vc-scheduler:v1.9.0
I0918 19:39:04.349102 15685 host.go:66] Checking if "addons-457129" exists ...
I0918 19:39:04.350742 15685 out.go:177] - Using image registry.k8s.io/ingress-nginx/controller:v1.11.2
I0918 19:39:04.352177 15685 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.3
I0918 19:39:04.353507 15685 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.3
I0918 19:39:04.355285 15685 addons.go:431] installing /etc/kubernetes/addons/ingress-deploy.yaml
I0918 19:39:04.355317 15685 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ingress-deploy.yaml (16078 bytes)
I0918 19:39:04.355372 15685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-457129
I0918 19:39:04.356757 15685 addons.go:431] installing /etc/kubernetes/addons/volcano-deployment.yaml
I0918 19:39:04.356786 15685 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/volcano-deployment.yaml (434001 bytes)
I0918 19:39:04.356849 15685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-457129
I0918 19:39:04.364567 15685 out.go:177] - Using image registry.k8s.io/metrics-server/metrics-server:v0.7.2
I0918 19:39:04.365885 15685 out.go:177] - Using image registry.k8s.io/sig-storage/snapshot-controller:v6.1.0
I0918 19:39:04.365914 15685 out.go:177] - Using image registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.6.0
I0918 19:39:04.366023 15685 addons.go:431] installing /etc/kubernetes/addons/metrics-apiservice.yaml
I0918 19:39:04.366036 15685 ssh_runner.go:362] scp metrics-server/metrics-apiservice.yaml --> /etc/kubernetes/addons/metrics-apiservice.yaml (424 bytes)
I0918 19:39:04.366112 15685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-457129
I0918 19:39:04.367623 15685 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml
I0918 19:39:04.367644 15685 ssh_runner.go:362] scp volumesnapshots/csi-hostpath-snapshotclass.yaml --> /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml (934 bytes)
I0918 19:39:04.367724 15685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-457129
I0918 19:39:04.367849 15685 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19667-7499/.minikube/machines/addons-457129/id_rsa Username:docker}
I0918 19:39:04.369798 15685 out.go:177] - Using image docker.io/rancher/local-path-provisioner:v0.0.22
I0918 19:39:04.370513 15685 out.go:177] - Using image registry.k8s.io/sig-storage/hostpathplugin:v1.9.0
I0918 19:39:04.372672 15685 out.go:177] - Using image docker.io/busybox:stable
I0918 19:39:04.372733 15685 out.go:177] - Using image registry.k8s.io/sig-storage/livenessprobe:v2.8.0
I0918 19:39:04.375162 15685 out.go:177] - Using image gcr.io/k8s-minikube/storage-provisioner:v5
I0918 19:39:04.375343 15685 addons.go:431] installing /etc/kubernetes/addons/storage-provisioner-rancher.yaml
I0918 19:39:04.375366 15685 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner-rancher.yaml (3113 bytes)
I0918 19:39:04.375467 15685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-457129
I0918 19:39:04.376622 15685 addons.go:431] installing /etc/kubernetes/addons/storage-provisioner.yaml
I0918 19:39:04.376652 15685 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes)
I0918 19:39:04.376708 15685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-457129
I0918 19:39:04.376882 15685 out.go:177] - Using image registry.k8s.io/sig-storage/csi-resizer:v1.6.0
I0918 19:39:04.378271 15685 out.go:177] - Using image registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0
I0918 19:39:04.398582 15685 addons.go:431] installing /etc/kubernetes/addons/storageclass.yaml
I0918 19:39:04.398605 15685 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes)
I0918 19:39:04.398658 15685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-457129
I0918 19:39:04.399399 15685 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19667-7499/.minikube/machines/addons-457129/id_rsa Username:docker}
I0918 19:39:04.400566 15685 out.go:177] - Using image ghcr.io/inspektor-gadget/inspektor-gadget:v0.32.0
I0918 19:39:04.401268 15685 out.go:177] - Using image nvcr.io/nvidia/k8s-device-plugin:v0.16.2
I0918 19:39:04.401381 15685 out.go:177] - Using image registry.k8s.io/sig-storage/csi-provisioner:v3.3.0
I0918 19:39:04.401718 15685 out.go:177] - Using image gcr.io/k8s-minikube/minikube-ingress-dns:0.0.3
I0918 19:39:04.401724 15685 addons.go:431] installing /etc/kubernetes/addons/ig-namespace.yaml
I0918 19:39:04.401815 15685 ssh_runner.go:362] scp inspektor-gadget/ig-namespace.yaml --> /etc/kubernetes/addons/ig-namespace.yaml (55 bytes)
I0918 19:39:04.401883 15685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-457129
I0918 19:39:04.403383 15685 addons.go:431] installing /etc/kubernetes/addons/nvidia-device-plugin.yaml
I0918 19:39:04.403397 15685 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/nvidia-device-plugin.yaml (1966 bytes)
I0918 19:39:04.403439 15685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-457129
I0918 19:39:04.403386 15685 addons.go:431] installing /etc/kubernetes/addons/ingress-dns-pod.yaml
I0918 19:39:04.403479 15685 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ingress-dns-pod.yaml (2442 bytes)
I0918 19:39:04.403525 15685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-457129
I0918 19:39:04.408977 15685 out.go:177] - Using image registry.k8s.io/sig-storage/csi-attacher:v4.0.0
I0918 19:39:04.411423 15685 out.go:177] - Using image registry.k8s.io/sig-storage/csi-external-health-monitor-controller:v0.7.0
I0918 19:39:04.415139 15685 addons.go:431] installing /etc/kubernetes/addons/rbac-external-attacher.yaml
I0918 19:39:04.415165 15685 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-attacher.yaml --> /etc/kubernetes/addons/rbac-external-attacher.yaml (3073 bytes)
I0918 19:39:04.415233 15685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-457129
I0918 19:39:04.416092 15685 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19667-7499/.minikube/machines/addons-457129/id_rsa Username:docker}
I0918 19:39:04.416241 15685 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19667-7499/.minikube/machines/addons-457129/id_rsa Username:docker}
I0918 19:39:04.421652 15685 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19667-7499/.minikube/machines/addons-457129/id_rsa Username:docker}
I0918 19:39:04.437036 15685 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19667-7499/.minikube/machines/addons-457129/id_rsa Username:docker}
I0918 19:39:04.438471 15685 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19667-7499/.minikube/machines/addons-457129/id_rsa Username:docker}
I0918 19:39:04.452306 15685 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19667-7499/.minikube/machines/addons-457129/id_rsa Username:docker}
I0918 19:39:04.457051 15685 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19667-7499/.minikube/machines/addons-457129/id_rsa Username:docker}
I0918 19:39:04.458221 15685 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19667-7499/.minikube/machines/addons-457129/id_rsa Username:docker}
I0918 19:39:04.458505 15685 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19667-7499/.minikube/machines/addons-457129/id_rsa Username:docker}
I0918 19:39:04.460264 15685 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19667-7499/.minikube/machines/addons-457129/id_rsa Username:docker}
I0918 19:39:04.465918 15685 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19667-7499/.minikube/machines/addons-457129/id_rsa Username:docker}
I0918 19:39:04.467617 15685 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19667-7499/.minikube/machines/addons-457129/id_rsa Username:docker}
I0918 19:39:04.468330 15685 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19667-7499/.minikube/machines/addons-457129/id_rsa Username:docker}
W0918 19:39:04.506023 15685 sshutil.go:64] dial failure (will retry): ssh: handshake failed: EOF
I0918 19:39:04.506062 15685 retry.go:31] will retry after 353.295676ms: ssh: handshake failed: EOF
W0918 19:39:04.506180 15685 sshutil.go:64] dial failure (will retry): ssh: handshake failed: EOF
I0918 19:39:04.506192 15685 retry.go:31] will retry after 209.760713ms: ssh: handshake failed: EOF
W0918 19:39:04.506271 15685 sshutil.go:64] dial failure (will retry): ssh: handshake failed: EOF
I0918 19:39:04.506278 15685 retry.go:31] will retry after 236.682396ms: ssh: handshake failed: EOF
I0918 19:39:04.523778 15685 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -"
I0918 19:39:04.705792 15685 ssh_runner.go:195] Run: sudo systemctl start kubelet
I0918 19:39:04.710358 15685 addons.go:431] installing /etc/kubernetes/addons/helm-tiller-rbac.yaml
I0918 19:39:04.710436 15685 ssh_runner.go:362] scp helm-tiller/helm-tiller-rbac.yaml --> /etc/kubernetes/addons/helm-tiller-rbac.yaml (1188 bytes)
I0918 19:39:04.912763 15685 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/deployment.yaml
I0918 19:39:04.923074 15685 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml
I0918 19:39:05.006330 15685 addons.go:431] installing /etc/kubernetes/addons/helm-tiller-svc.yaml
I0918 19:39:05.006360 15685 ssh_runner.go:362] scp helm-tiller/helm-tiller-svc.yaml --> /etc/kubernetes/addons/helm-tiller-svc.yaml (951 bytes)
I0918 19:39:05.016987 15685 addons.go:431] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml
I0918 19:39:05.017014 15685 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshotclasses.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml (6471 bytes)
I0918 19:39:05.022209 15685 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ingress-deploy.yaml
I0918 19:39:05.107830 15685 addons.go:431] installing /etc/kubernetes/addons/yakd-sa.yaml
I0918 19:39:05.107914 15685 ssh_runner.go:362] scp yakd/yakd-sa.yaml --> /etc/kubernetes/addons/yakd-sa.yaml (247 bytes)
I0918 19:39:05.110900 15685 addons.go:431] installing /etc/kubernetes/addons/registry-svc.yaml
I0918 19:39:05.110981 15685 ssh_runner.go:362] scp registry/registry-svc.yaml --> /etc/kubernetes/addons/registry-svc.yaml (398 bytes)
I0918 19:39:05.113168 15685 addons.go:431] installing /etc/kubernetes/addons/metrics-server-deployment.yaml
I0918 19:39:05.113234 15685 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/metrics-server-deployment.yaml (1907 bytes)
I0918 19:39:05.123987 15685 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner-rancher.yaml
I0918 19:39:05.207086 15685 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ingress-dns-pod.yaml
I0918 19:39:05.209653 15685 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/nvidia-device-plugin.yaml
I0918 19:39:05.209905 15685 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/volcano-deployment.yaml
I0918 19:39:05.212920 15685 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/helm-tiller-dp.yaml -f /etc/kubernetes/addons/helm-tiller-rbac.yaml -f /etc/kubernetes/addons/helm-tiller-svc.yaml
I0918 19:39:05.306930 15685 addons.go:431] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml
I0918 19:39:05.306970 15685 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshotcontents.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml (23126 bytes)
I0918 19:39:05.313728 15685 addons.go:431] installing /etc/kubernetes/addons/registry-proxy.yaml
I0918 19:39:05.313771 15685 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/registry-proxy.yaml (947 bytes)
I0918 19:39:05.406833 15685 addons.go:431] installing /etc/kubernetes/addons/yakd-crb.yaml
I0918 19:39:05.406861 15685 ssh_runner.go:362] scp yakd/yakd-crb.yaml --> /etc/kubernetes/addons/yakd-crb.yaml (422 bytes)
I0918 19:39:05.507324 15685 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml
I0918 19:39:05.607586 15685 addons.go:431] installing /etc/kubernetes/addons/rbac-hostpath.yaml
I0918 19:39:05.607680 15685 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-hostpath.yaml --> /etc/kubernetes/addons/rbac-hostpath.yaml (4266 bytes)
I0918 19:39:05.706835 15685 addons.go:431] installing /etc/kubernetes/addons/ig-serviceaccount.yaml
I0918 19:39:05.706942 15685 ssh_runner.go:362] scp inspektor-gadget/ig-serviceaccount.yaml --> /etc/kubernetes/addons/ig-serviceaccount.yaml (80 bytes)
I0918 19:39:05.712418 15685 addons.go:431] installing /etc/kubernetes/addons/yakd-svc.yaml
I0918 19:39:05.712516 15685 ssh_runner.go:362] scp yakd/yakd-svc.yaml --> /etc/kubernetes/addons/yakd-svc.yaml (412 bytes)
I0918 19:39:05.728315 15685 addons.go:431] installing /etc/kubernetes/addons/metrics-server-rbac.yaml
I0918 19:39:05.728408 15685 ssh_runner.go:362] scp metrics-server/metrics-server-rbac.yaml --> /etc/kubernetes/addons/metrics-server-rbac.yaml (2175 bytes)
I0918 19:39:05.809605 15685 addons.go:431] installing /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml
I0918 19:39:05.809695 15685 ssh_runner.go:362] scp volumesnapshots/snapshot.storage.k8s.io_volumesnapshots.yaml --> /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml (19582 bytes)
I0918 19:39:06.122239 15685 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/registry-rc.yaml -f /etc/kubernetes/addons/registry-svc.yaml -f /etc/kubernetes/addons/registry-proxy.yaml
I0918 19:39:06.220550 15685 addons.go:431] installing /etc/kubernetes/addons/metrics-server-service.yaml
I0918 19:39:06.220631 15685 ssh_runner.go:362] scp metrics-server/metrics-server-service.yaml --> /etc/kubernetes/addons/metrics-server-service.yaml (446 bytes)
I0918 19:39:06.405935 15685 addons.go:431] installing /etc/kubernetes/addons/yakd-dp.yaml
I0918 19:39:06.406019 15685 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/yakd-dp.yaml (2017 bytes)
I0918 19:39:06.424161 15685 ssh_runner.go:235] Completed: sudo systemctl start kubelet: (1.718276825s)
I0918 19:39:06.424291 15685 ssh_runner.go:235] Completed: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.49.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.31.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -": (1.900315277s)
I0918 19:39:06.424453 15685 start.go:971] {"host.minikube.internal": 192.168.49.1} host record injected into CoreDNS's ConfigMap
I0918 19:39:06.426516 15685 node_ready.go:35] waiting up to 6m0s for node "addons-457129" to be "Ready" ...
I0918 19:39:06.508222 15685 node_ready.go:49] node "addons-457129" has status "Ready":"True"
I0918 19:39:06.508315 15685 node_ready.go:38] duration metric: took 81.629275ms for node "addons-457129" to be "Ready" ...
I0918 19:39:06.508343 15685 pod_ready.go:36] extra waiting up to 6m0s for all system-critical pods including labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0918 19:39:06.510213 15685 addons.go:431] installing /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml
I0918 19:39:06.510294 15685 ssh_runner.go:362] scp volumesnapshots/rbac-volume-snapshot-controller.yaml --> /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml (3545 bytes)
I0918 19:39:06.518146 15685 pod_ready.go:79] waiting up to 6m0s for pod "coredns-7c65d6cfc9-f5gph" in "kube-system" namespace to be "Ready" ...
I0918 19:39:06.621412 15685 addons.go:431] installing /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml
I0918 19:39:06.621453 15685 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-health-monitor-controller.yaml --> /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml (3038 bytes)
I0918 19:39:06.807006 15685 addons.go:431] installing /etc/kubernetes/addons/ig-role.yaml
I0918 19:39:06.807034 15685 ssh_runner.go:362] scp inspektor-gadget/ig-role.yaml --> /etc/kubernetes/addons/ig-role.yaml (210 bytes)
I0918 19:39:06.807069 15685 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml
I0918 19:39:06.817505 15685 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/yakd-ns.yaml -f /etc/kubernetes/addons/yakd-sa.yaml -f /etc/kubernetes/addons/yakd-crb.yaml -f /etc/kubernetes/addons/yakd-svc.yaml -f /etc/kubernetes/addons/yakd-dp.yaml
I0918 19:39:07.008255 15685 kapi.go:214] "coredns" deployment in "kube-system" namespace and "addons-457129" context rescaled to 1 replicas
I0918 19:39:07.214237 15685 addons.go:431] installing /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I0918 19:39:07.214264 15685 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml (1475 bytes)
I0918 19:39:07.316208 15685 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/deployment.yaml: (2.403397567s)
I0918 19:39:07.316350 15685 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml: (2.393246592s)
I0918 19:39:07.416928 15685 addons.go:431] installing /etc/kubernetes/addons/rbac-external-provisioner.yaml
I0918 19:39:07.417013 15685 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-provisioner.yaml --> /etc/kubernetes/addons/rbac-external-provisioner.yaml (4442 bytes)
I0918 19:39:07.515403 15685 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I0918 19:39:07.715841 15685 addons.go:431] installing /etc/kubernetes/addons/ig-rolebinding.yaml
I0918 19:39:07.715935 15685 ssh_runner.go:362] scp inspektor-gadget/ig-rolebinding.yaml --> /etc/kubernetes/addons/ig-rolebinding.yaml (244 bytes)
I0918 19:39:08.223508 15685 addons.go:431] installing /etc/kubernetes/addons/rbac-external-resizer.yaml
I0918 19:39:08.223590 15685 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-resizer.yaml --> /etc/kubernetes/addons/rbac-external-resizer.yaml (2943 bytes)
I0918 19:39:08.525231 15685 pod_ready.go:103] pod "coredns-7c65d6cfc9-f5gph" in "kube-system" namespace has status "Ready":"False"
I0918 19:39:08.619872 15685 addons.go:431] installing /etc/kubernetes/addons/ig-clusterrole.yaml
I0918 19:39:08.619945 15685 ssh_runner.go:362] scp inspektor-gadget/ig-clusterrole.yaml --> /etc/kubernetes/addons/ig-clusterrole.yaml (1485 bytes)
I0918 19:39:08.728794 15685 addons.go:431] installing /etc/kubernetes/addons/rbac-external-snapshotter.yaml
I0918 19:39:08.728864 15685 ssh_runner.go:362] scp csi-hostpath-driver/rbac/rbac-external-snapshotter.yaml --> /etc/kubernetes/addons/rbac-external-snapshotter.yaml (3149 bytes)
I0918 19:39:08.919976 15685 addons.go:431] installing /etc/kubernetes/addons/ig-clusterrolebinding.yaml
I0918 19:39:08.920005 15685 ssh_runner.go:362] scp inspektor-gadget/ig-clusterrolebinding.yaml --> /etc/kubernetes/addons/ig-clusterrolebinding.yaml (274 bytes)
I0918 19:39:09.423030 15685 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-attacher.yaml
I0918 19:39:09.423069 15685 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-attacher.yaml (2143 bytes)
I0918 19:39:09.605870 15685 addons.go:431] installing /etc/kubernetes/addons/ig-crd.yaml
I0918 19:39:09.605896 15685 ssh_runner.go:362] scp inspektor-gadget/ig-crd.yaml --> /etc/kubernetes/addons/ig-crd.yaml (5216 bytes)
I0918 19:39:10.023112 15685 addons.go:431] installing /etc/kubernetes/addons/ig-daemonset.yaml
I0918 19:39:10.023137 15685 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/ig-daemonset.yaml (7735 bytes)
I0918 19:39:10.127434 15685 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml
I0918 19:39:10.127474 15685 ssh_runner.go:362] scp csi-hostpath-driver/deploy/csi-hostpath-driverinfo.yaml --> /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml (1274 bytes)
I0918 19:39:10.515195 15685 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ig-namespace.yaml -f /etc/kubernetes/addons/ig-serviceaccount.yaml -f /etc/kubernetes/addons/ig-role.yaml -f /etc/kubernetes/addons/ig-rolebinding.yaml -f /etc/kubernetes/addons/ig-clusterrole.yaml -f /etc/kubernetes/addons/ig-clusterrolebinding.yaml -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-daemonset.yaml
I0918 19:39:10.526322 15685 pod_ready.go:103] pod "coredns-7c65d6cfc9-f5gph" in "kube-system" namespace has status "Ready":"False"
I0918 19:39:10.529651 15685 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-plugin.yaml
I0918 19:39:10.529735 15685 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-plugin.yaml (8201 bytes)
I0918 19:39:10.812803 15685 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-resizer.yaml
I0918 19:39:10.812881 15685 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/csi-hostpath-resizer.yaml (2191 bytes)
I0918 19:39:11.324698 15685 addons.go:431] installing /etc/kubernetes/addons/csi-hostpath-storageclass.yaml
I0918 19:39:11.324809 15685 ssh_runner.go:362] scp csi-hostpath-driver/deploy/csi-hostpath-storageclass.yaml --> /etc/kubernetes/addons/csi-hostpath-storageclass.yaml (846 bytes)
I0918 19:39:11.413540 15685 ssh_runner.go:362] scp memory --> /var/lib/minikube/google_application_credentials.json (162 bytes)
I0918 19:39:11.413649 15685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-457129
I0918 19:39:11.439200 15685 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19667-7499/.minikube/machines/addons-457129/id_rsa Username:docker}
I0918 19:39:11.711862 15685 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/rbac-external-attacher.yaml -f /etc/kubernetes/addons/rbac-hostpath.yaml -f /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml -f /etc/kubernetes/addons/rbac-external-provisioner.yaml -f /etc/kubernetes/addons/rbac-external-resizer.yaml -f /etc/kubernetes/addons/rbac-external-snapshotter.yaml -f /etc/kubernetes/addons/csi-hostpath-attacher.yaml -f /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml -f /etc/kubernetes/addons/csi-hostpath-plugin.yaml -f /etc/kubernetes/addons/csi-hostpath-resizer.yaml -f /etc/kubernetes/addons/csi-hostpath-storageclass.yaml
I0918 19:39:12.210568 15685 ssh_runner.go:362] scp memory --> /var/lib/minikube/google_cloud_project (12 bytes)
I0918 19:39:12.512583 15685 addons.go:234] Setting addon gcp-auth=true in "addons-457129"
I0918 19:39:12.512663 15685 host.go:66] Checking if "addons-457129" exists ...
I0918 19:39:12.513245 15685 cli_runner.go:164] Run: docker container inspect addons-457129 --format={{.State.Status}}
I0918 19:39:12.537731 15685 ssh_runner.go:195] Run: cat /var/lib/minikube/google_application_credentials.json
I0918 19:39:12.537789 15685 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" addons-457129
I0918 19:39:12.556026 15685 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32768 SSHKeyPath:/home/jenkins/minikube-integration/19667-7499/.minikube/machines/addons-457129/id_rsa Username:docker}
I0918 19:39:12.607001 15685 pod_ready.go:103] pod "coredns-7c65d6cfc9-f5gph" in "kube-system" namespace has status "Ready":"False"
I0918 19:39:13.415984 15685 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ingress-deploy.yaml: (8.393673352s)
I0918 19:39:13.416247 15685 addons.go:475] Verifying addon ingress=true in "addons-457129"
I0918 19:39:13.416282 15685 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ingress-dns-pod.yaml: (8.209088607s)
I0918 19:39:13.416199 15685 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner-rancher.yaml: (8.292177688s)
I0918 19:39:13.418627 15685 out.go:177] * Verifying ingress addon...
I0918 19:39:13.421193 15685 kapi.go:75] Waiting for pod with label "app.kubernetes.io/name=ingress-nginx" in ns "ingress-nginx" ...
I0918 19:39:13.428294 15685 kapi.go:86] Found 3 Pods for label selector app.kubernetes.io/name=ingress-nginx
I0918 19:39:13.428368 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:13.930065 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:14.425664 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:14.931258 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:15.107756 15685 pod_ready.go:103] pod "coredns-7c65d6cfc9-f5gph" in "kube-system" namespace has status "Ready":"False"
I0918 19:39:15.427306 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:16.023482 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:16.428409 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:16.926010 15685 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/volcano-deployment.yaml: (11.716048111s)
I0918 19:39:16.926143 15685 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/nvidia-device-plugin.yaml: (11.71640398s)
I0918 19:39:16.926251 15685 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/helm-tiller-dp.yaml -f /etc/kubernetes/addons/helm-tiller-rbac.yaml -f /etc/kubernetes/addons/helm-tiller-svc.yaml: (11.713245576s)
I0918 19:39:16.926338 15685 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml: (11.41892158s)
I0918 19:39:16.926410 15685 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/registry-rc.yaml -f /etc/kubernetes/addons/registry-svc.yaml -f /etc/kubernetes/addons/registry-proxy.yaml: (10.804080734s)
I0918 19:39:16.926448 15685 addons.go:475] Verifying addon registry=true in "addons-457129"
I0918 19:39:16.926958 15685 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/yakd-ns.yaml -f /etc/kubernetes/addons/yakd-sa.yaml -f /etc/kubernetes/addons/yakd-crb.yaml -f /etc/kubernetes/addons/yakd-svc.yaml -f /etc/kubernetes/addons/yakd-dp.yaml: (10.109368338s)
I0918 19:39:16.927150 15685 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: (9.411707352s)
W0918 19:39:16.927176 15685 addons.go:457] apply failed, will retry: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: Process exited with status 1
stdout:
customresourcedefinition.apiextensions.k8s.io/volumesnapshotclasses.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshotcontents.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshots.snapshot.storage.k8s.io created
serviceaccount/snapshot-controller created
clusterrole.rbac.authorization.k8s.io/snapshot-controller-runner created
clusterrolebinding.rbac.authorization.k8s.io/snapshot-controller-role created
role.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
rolebinding.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
deployment.apps/snapshot-controller created
stderr:
error: resource mapping not found for name: "csi-hostpath-snapclass" namespace: "" from "/etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml": no matches for kind "VolumeSnapshotClass" in version "snapshot.storage.k8s.io/v1"
ensure CRDs are installed first
I0918 19:39:16.927195 15685 retry.go:31] will retry after 167.016922ms: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: Process exited with status 1
stdout:
customresourcedefinition.apiextensions.k8s.io/volumesnapshotclasses.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshotcontents.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshots.snapshot.storage.k8s.io created
serviceaccount/snapshot-controller created
clusterrole.rbac.authorization.k8s.io/snapshot-controller-runner created
clusterrolebinding.rbac.authorization.k8s.io/snapshot-controller-role created
role.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
rolebinding.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
deployment.apps/snapshot-controller created
stderr:
error: resource mapping not found for name: "csi-hostpath-snapclass" namespace: "" from "/etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml": no matches for kind "VolumeSnapshotClass" in version "snapshot.storage.k8s.io/v1"
ensure CRDs are installed first
I0918 19:39:16.927276 15685 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/ig-namespace.yaml -f /etc/kubernetes/addons/ig-serviceaccount.yaml -f /etc/kubernetes/addons/ig-role.yaml -f /etc/kubernetes/addons/ig-rolebinding.yaml -f /etc/kubernetes/addons/ig-clusterrole.yaml -f /etc/kubernetes/addons/ig-clusterrolebinding.yaml -f /etc/kubernetes/addons/ig-crd.yaml -f /etc/kubernetes/addons/ig-daemonset.yaml: (6.411993921s)
I0918 19:39:16.927392 15685 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/metrics-apiservice.yaml -f /etc/kubernetes/addons/metrics-server-deployment.yaml -f /etc/kubernetes/addons/metrics-server-rbac.yaml -f /etc/kubernetes/addons/metrics-server-service.yaml: (10.119742326s)
I0918 19:39:16.927438 15685 addons.go:475] Verifying addon metrics-server=true in "addons-457129"
I0918 19:39:16.929137 15685 out.go:177] * To access YAKD - Kubernetes Dashboard, wait for Pod to be ready and run the following command:
minikube -p addons-457129 service yakd-dashboard -n yakd-dashboard
I0918 19:39:16.929288 15685 out.go:177] * Verifying registry addon...
I0918 19:39:16.930423 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:16.933653 15685 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=registry" in ns "kube-system" ...
I0918 19:39:17.008321 15685 kapi.go:86] Found 2 Pods for label selector kubernetes.io/minikube-addons=registry
I0918 19:39:17.008346 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:17.094766 15685 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply --force -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml
I0918 19:39:17.426295 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:17.508514 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:17.524483 15685 pod_ready.go:103] pod "coredns-7c65d6cfc9-f5gph" in "kube-system" namespace has status "Ready":"False"
I0918 19:39:18.009561 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:18.009903 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:18.424871 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:18.524321 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:18.807605 15685 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/rbac-external-attacher.yaml -f /etc/kubernetes/addons/rbac-hostpath.yaml -f /etc/kubernetes/addons/rbac-external-health-monitor-controller.yaml -f /etc/kubernetes/addons/rbac-external-provisioner.yaml -f /etc/kubernetes/addons/rbac-external-resizer.yaml -f /etc/kubernetes/addons/rbac-external-snapshotter.yaml -f /etc/kubernetes/addons/csi-hostpath-attacher.yaml -f /etc/kubernetes/addons/csi-hostpath-driverinfo.yaml -f /etc/kubernetes/addons/csi-hostpath-plugin.yaml -f /etc/kubernetes/addons/csi-hostpath-resizer.yaml -f /etc/kubernetes/addons/csi-hostpath-storageclass.yaml: (7.095684841s)
I0918 19:39:18.807701 15685 addons.go:475] Verifying addon csi-hostpath-driver=true in "addons-457129"
I0918 19:39:18.807750 15685 ssh_runner.go:235] Completed: cat /var/lib/minikube/google_application_credentials.json: (6.269987653s)
I0918 19:39:18.810201 15685 out.go:177] * Verifying csi-hostpath-driver addon...
I0918 19:39:18.810232 15685 out.go:177] - Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.3
I0918 19:39:18.812232 15685 out.go:177] - Using image gcr.io/k8s-minikube/gcp-auth-webhook:v0.1.2
I0918 19:39:18.813160 15685 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=csi-hostpath-driver" in ns "kube-system" ...
I0918 19:39:18.813434 15685 addons.go:431] installing /etc/kubernetes/addons/gcp-auth-ns.yaml
I0918 19:39:18.813458 15685 ssh_runner.go:362] scp gcp-auth/gcp-auth-ns.yaml --> /etc/kubernetes/addons/gcp-auth-ns.yaml (700 bytes)
I0918 19:39:18.817263 15685 kapi.go:86] Found 3 Pods for label selector kubernetes.io/minikube-addons=csi-hostpath-driver
I0918 19:39:18.817286 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:18.907910 15685 addons.go:431] installing /etc/kubernetes/addons/gcp-auth-service.yaml
I0918 19:39:18.907939 15685 ssh_runner.go:362] scp gcp-auth/gcp-auth-service.yaml --> /etc/kubernetes/addons/gcp-auth-service.yaml (788 bytes)
I0918 19:39:18.925557 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:18.935812 15685 addons.go:431] installing /etc/kubernetes/addons/gcp-auth-webhook.yaml
I0918 19:39:18.935834 15685 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/gcp-auth-webhook.yaml (5421 bytes)
I0918 19:39:18.937387 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:19.018766 15685 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/gcp-auth-ns.yaml -f /etc/kubernetes/addons/gcp-auth-service.yaml -f /etc/kubernetes/addons/gcp-auth-webhook.yaml
I0918 19:39:19.021044 15685 pod_ready.go:98] error getting pod "coredns-7c65d6cfc9-f5gph" in "kube-system" namespace (skipping!): pods "coredns-7c65d6cfc9-f5gph" not found
I0918 19:39:19.021075 15685 pod_ready.go:82] duration metric: took 12.502850895s for pod "coredns-7c65d6cfc9-f5gph" in "kube-system" namespace to be "Ready" ...
E0918 19:39:19.021088 15685 pod_ready.go:67] WaitExtra: waitPodCondition: error getting pod "coredns-7c65d6cfc9-f5gph" in "kube-system" namespace (skipping!): pods "coredns-7c65d6cfc9-f5gph" not found
I0918 19:39:19.021099 15685 pod_ready.go:79] waiting up to 6m0s for pod "coredns-7c65d6cfc9-qw624" in "kube-system" namespace to be "Ready" ...
I0918 19:39:19.318203 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:19.409657 15685 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply --force -f /etc/kubernetes/addons/csi-hostpath-snapshotclass.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -f /etc/kubernetes/addons/snapshot.storage.k8s.io_volumesnapshots.yaml -f /etc/kubernetes/addons/rbac-volume-snapshot-controller.yaml -f /etc/kubernetes/addons/volume-snapshot-controller-deployment.yaml: (2.31484694s)
I0918 19:39:19.426511 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:19.507769 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:19.818610 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:19.926172 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:19.937333 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:20.306564 15685 ssh_runner.go:235] Completed: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.31.1/kubectl apply -f /etc/kubernetes/addons/gcp-auth-ns.yaml -f /etc/kubernetes/addons/gcp-auth-service.yaml -f /etc/kubernetes/addons/gcp-auth-webhook.yaml: (1.287749389s)
I0918 19:39:20.308945 15685 addons.go:475] Verifying addon gcp-auth=true in "addons-457129"
I0918 19:39:20.310415 15685 out.go:177] * Verifying gcp-auth addon...
I0918 19:39:20.312390 15685 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=gcp-auth" in ns "gcp-auth" ...
I0918 19:39:20.315019 15685 kapi.go:86] Found 0 Pods for label selector kubernetes.io/minikube-addons=gcp-auth
I0918 19:39:20.317554 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:20.426228 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:20.437231 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:20.817495 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:20.924997 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:20.936803 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:21.026313 15685 pod_ready.go:103] pod "coredns-7c65d6cfc9-qw624" in "kube-system" namespace has status "Ready":"False"
I0918 19:39:21.317531 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:21.424476 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:21.436640 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:21.817203 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:21.925032 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:21.937124 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:22.317034 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:22.425160 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:22.437235 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:22.817040 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:22.924655 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:23.024537 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:23.318093 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:23.427994 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:23.436808 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:23.526647 15685 pod_ready.go:103] pod "coredns-7c65d6cfc9-qw624" in "kube-system" namespace has status "Ready":"False"
I0918 19:39:23.816802 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:23.925466 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:23.937511 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:24.317324 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:24.425582 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:24.436573 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:24.817205 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:24.925494 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:24.937900 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:25.317144 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:25.425133 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:25.437025 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:25.527293 15685 pod_ready.go:103] pod "coredns-7c65d6cfc9-qw624" in "kube-system" namespace has status "Ready":"False"
I0918 19:39:25.817382 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:25.925275 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:25.937182 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:26.317094 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:26.425608 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:26.437916 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:26.817617 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:26.925101 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:26.937198 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:27.318655 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:27.425071 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:27.437106 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:27.527526 15685 pod_ready.go:103] pod "coredns-7c65d6cfc9-qw624" in "kube-system" namespace has status "Ready":"False"
I0918 19:39:27.817205 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:27.926288 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:27.937171 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:28.318652 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:28.425158 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:28.437195 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:28.817329 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:28.926297 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:28.937559 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:29.318154 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:29.425861 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:29.437328 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:29.817673 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:29.925397 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:29.937382 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:30.027854 15685 pod_ready.go:103] pod "coredns-7c65d6cfc9-qw624" in "kube-system" namespace has status "Ready":"False"
I0918 19:39:30.316832 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:30.425093 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:30.437164 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:30.816465 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:30.925649 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:30.936203 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:31.316732 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:31.425189 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:31.437354 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:31.816819 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:31.925287 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:31.937380 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:32.317206 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:32.425043 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:32.438435 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:32.527772 15685 pod_ready.go:103] pod "coredns-7c65d6cfc9-qw624" in "kube-system" namespace has status "Ready":"False"
I0918 19:39:32.817691 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:32.925976 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:32.937410 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:33.317740 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:33.425363 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:33.437714 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:33.818083 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:33.929130 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:33.937750 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:34.317459 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:34.428122 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:34.437496 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:34.527872 15685 pod_ready.go:103] pod "coredns-7c65d6cfc9-qw624" in "kube-system" namespace has status "Ready":"False"
I0918 19:39:34.817071 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:34.926238 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:34.937385 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:35.317139 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:35.425793 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:35.437149 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:35.817535 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:35.925504 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:35.937735 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:36.317471 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:36.425742 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:36.436647 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:36.817773 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:36.925088 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:36.937171 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:37.027317 15685 pod_ready.go:103] pod "coredns-7c65d6cfc9-qw624" in "kube-system" namespace has status "Ready":"False"
I0918 19:39:37.317426 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:37.425364 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:37.437485 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:37.816966 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:37.925490 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:37.937479 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:38.316930 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:38.425321 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:38.437360 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:38.817352 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:38.925827 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:38.936757 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:39.317011 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:39.425337 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:39.437595 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:39.525937 15685 pod_ready.go:103] pod "coredns-7c65d6cfc9-qw624" in "kube-system" namespace has status "Ready":"False"
I0918 19:39:39.816941 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:39.925480 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:39.936520 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:40.316852 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:40.425244 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:40.437321 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:40.817078 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:40.926078 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:41.025376 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:41.317473 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:41.425153 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:41.437288 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:41.526501 15685 pod_ready.go:103] pod "coredns-7c65d6cfc9-qw624" in "kube-system" namespace has status "Ready":"False"
I0918 19:39:41.816671 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:41.925135 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:41.937108 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:42.317386 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:42.425515 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:42.436728 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:42.816928 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:42.925364 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:42.937120 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:43.316761 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:43.425077 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:43.437281 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:43.527191 15685 pod_ready.go:103] pod "coredns-7c65d6cfc9-qw624" in "kube-system" namespace has status "Ready":"False"
I0918 19:39:43.816672 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:43.925471 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:43.936571 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:44.317585 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:44.425407 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:44.437466 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:44.816775 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:44.925799 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:44.937105 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:45.316935 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:45.425812 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:45.437013 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:45.817190 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:45.926132 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:45.937419 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:46.027638 15685 pod_ready.go:103] pod "coredns-7c65d6cfc9-qw624" in "kube-system" namespace has status "Ready":"False"
I0918 19:39:46.317367 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:46.425504 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:46.436392 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:46.881799 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:47.019391 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=registry", current state: Pending: [<nil>]
I0918 19:39:47.019705 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:47.027674 15685 pod_ready.go:93] pod "coredns-7c65d6cfc9-qw624" in "kube-system" namespace has status "Ready":"True"
I0918 19:39:47.027698 15685 pod_ready.go:82] duration metric: took 28.006586609s for pod "coredns-7c65d6cfc9-qw624" in "kube-system" namespace to be "Ready" ...
I0918 19:39:47.027709 15685 pod_ready.go:79] waiting up to 6m0s for pod "etcd-addons-457129" in "kube-system" namespace to be "Ready" ...
I0918 19:39:47.031876 15685 pod_ready.go:93] pod "etcd-addons-457129" in "kube-system" namespace has status "Ready":"True"
I0918 19:39:47.031900 15685 pod_ready.go:82] duration metric: took 4.18421ms for pod "etcd-addons-457129" in "kube-system" namespace to be "Ready" ...
I0918 19:39:47.031912 15685 pod_ready.go:79] waiting up to 6m0s for pod "kube-apiserver-addons-457129" in "kube-system" namespace to be "Ready" ...
I0918 19:39:47.036257 15685 pod_ready.go:93] pod "kube-apiserver-addons-457129" in "kube-system" namespace has status "Ready":"True"
I0918 19:39:47.036281 15685 pod_ready.go:82] duration metric: took 4.360162ms for pod "kube-apiserver-addons-457129" in "kube-system" namespace to be "Ready" ...
I0918 19:39:47.036292 15685 pod_ready.go:79] waiting up to 6m0s for pod "kube-controller-manager-addons-457129" in "kube-system" namespace to be "Ready" ...
I0918 19:39:47.040262 15685 pod_ready.go:93] pod "kube-controller-manager-addons-457129" in "kube-system" namespace has status "Ready":"True"
I0918 19:39:47.040282 15685 pod_ready.go:82] duration metric: took 3.9838ms for pod "kube-controller-manager-addons-457129" in "kube-system" namespace to be "Ready" ...
I0918 19:39:47.040291 15685 pod_ready.go:79] waiting up to 6m0s for pod "kube-proxy-xk9xc" in "kube-system" namespace to be "Ready" ...
I0918 19:39:47.044437 15685 pod_ready.go:93] pod "kube-proxy-xk9xc" in "kube-system" namespace has status "Ready":"True"
I0918 19:39:47.044458 15685 pod_ready.go:82] duration metric: took 4.159459ms for pod "kube-proxy-xk9xc" in "kube-system" namespace to be "Ready" ...
I0918 19:39:47.044467 15685 pod_ready.go:79] waiting up to 6m0s for pod "kube-scheduler-addons-457129" in "kube-system" namespace to be "Ready" ...
I0918 19:39:47.317677 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:47.425864 15685 pod_ready.go:93] pod "kube-scheduler-addons-457129" in "kube-system" namespace has status "Ready":"True"
I0918 19:39:47.425885 15685 pod_ready.go:82] duration metric: took 381.412434ms for pod "kube-scheduler-addons-457129" in "kube-system" namespace to be "Ready" ...
I0918 19:39:47.425894 15685 pod_ready.go:39] duration metric: took 40.917530291s for extra waiting for all system-critical and pods with labels [k8s-app=kube-dns component=etcd component=kube-apiserver component=kube-controller-manager k8s-app=kube-proxy component=kube-scheduler] to be "Ready" ...
I0918 19:39:47.425913 15685 api_server.go:52] waiting for apiserver process to appear ...
I0918 19:39:47.426007 15685 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.*
I0918 19:39:47.426544 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:47.437483 15685 kapi.go:107] duration metric: took 30.503831942s to wait for kubernetes.io/minikube-addons=registry ...
I0918 19:39:47.443366 15685 api_server.go:72] duration metric: took 43.169909962s to wait for apiserver process to appear ...
I0918 19:39:47.443399 15685 api_server.go:88] waiting for apiserver healthz status ...
I0918 19:39:47.443427 15685 api_server.go:253] Checking apiserver healthz at https://192.168.49.2:8443/healthz ...
I0918 19:39:47.447595 15685 api_server.go:279] https://192.168.49.2:8443/healthz returned 200:
ok
I0918 19:39:47.448386 15685 api_server.go:141] control plane version: v1.31.1
I0918 19:39:47.448410 15685 api_server.go:131] duration metric: took 5.003997ms to wait for apiserver health ...
I0918 19:39:47.448417 15685 system_pods.go:43] waiting for kube-system pods to appear ...
I0918 19:39:47.632187 15685 system_pods.go:59] 18 kube-system pods found
I0918 19:39:47.632228 15685 system_pods.go:61] "coredns-7c65d6cfc9-qw624" [e4f7b79e-6ade-4491-924e-34e66190e129] Running
I0918 19:39:47.632240 15685 system_pods.go:61] "csi-hostpath-attacher-0" [5df964db-0e1a-4ab2-8c1b-08ccefda59c8] Pending / Ready:ContainersNotReady (containers with unready status: [csi-attacher]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-attacher])
I0918 19:39:47.632253 15685 system_pods.go:61] "csi-hostpath-resizer-0" [8d31632d-9428-40c2-bd75-48370ba9df30] Pending / Ready:ContainersNotReady (containers with unready status: [csi-resizer]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-resizer])
I0918 19:39:47.632268 15685 system_pods.go:61] "csi-hostpathplugin-gx9ps" [1b4aca2b-c7e0-4fd0-9635-1f3a31317460] Pending / Ready:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter])
I0918 19:39:47.632279 15685 system_pods.go:61] "etcd-addons-457129" [e8af1cd4-abf2-4101-885a-5b00aef361d5] Running
I0918 19:39:47.632289 15685 system_pods.go:61] "kube-apiserver-addons-457129" [96968776-c3eb-43ce-9ade-bcb3e2f1eae2] Running
I0918 19:39:47.632294 15685 system_pods.go:61] "kube-controller-manager-addons-457129" [de5540b1-56e3-4596-b224-772b8a326f2b] Running
I0918 19:39:47.632303 15685 system_pods.go:61] "kube-ingress-dns-minikube" [2a05d45a-a3e8-4472-b55d-3611ae4fae74] Running
I0918 19:39:47.632308 15685 system_pods.go:61] "kube-proxy-xk9xc" [684cdba6-dbb9-49b9-aeb9-718120abba98] Running
I0918 19:39:47.632315 15685 system_pods.go:61] "kube-scheduler-addons-457129" [7358eba4-20af-43f9-abb7-0f241b859124] Running
I0918 19:39:47.632321 15685 system_pods.go:61] "metrics-server-84c5f94fbc-h4fl2" [a77387ba-6450-4ffe-9aa7-de0bf96f74da] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
I0918 19:39:47.632327 15685 system_pods.go:61] "nvidia-device-plugin-daemonset-5p5wt" [b9ee4eb2-3471-4a3f-83b8-cd8dabafe83c] Running
I0918 19:39:47.632331 15685 system_pods.go:61] "registry-66c9cd494c-kh4r9" [b2944da3-d9b7-4de7-8a57-f934ec8b2970] Running
I0918 19:39:47.632338 15685 system_pods.go:61] "registry-proxy-cnkhj" [e15cccd8-7fcb-48c9-9dc2-e79744e87759] Running
I0918 19:39:47.632345 15685 system_pods.go:61] "snapshot-controller-56fcc65765-pw2mr" [17a077f4-27f7-4a22-95e4-47ed4cc7f9e3] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
I0918 19:39:47.632362 15685 system_pods.go:61] "snapshot-controller-56fcc65765-s87zp" [4de05312-3887-4578-b327-e1d6e6ead173] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
I0918 19:39:47.632371 15685 system_pods.go:61] "storage-provisioner" [c887feeb-fb7b-495f-a43c-fca8ac121bca] Running
I0918 19:39:47.632378 15685 system_pods.go:61] "tiller-deploy-b48cc5f79-k64kf" [b79b5bb4-d211-4aa6-9551-5d2305acc2b2] Running
I0918 19:39:47.632390 15685 system_pods.go:74] duration metric: took 183.965128ms to wait for pod list to return data ...
I0918 19:39:47.632403 15685 default_sa.go:34] waiting for default service account to be created ...
I0918 19:39:47.817809 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:47.824845 15685 default_sa.go:45] found service account: "default"
I0918 19:39:47.824867 15685 default_sa.go:55] duration metric: took 192.456396ms for default service account to be created ...
I0918 19:39:47.824877 15685 system_pods.go:116] waiting for k8s-apps to be running ...
I0918 19:39:47.925564 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:48.030275 15685 system_pods.go:86] 18 kube-system pods found
I0918 19:39:48.030303 15685 system_pods.go:89] "coredns-7c65d6cfc9-qw624" [e4f7b79e-6ade-4491-924e-34e66190e129] Running
I0918 19:39:48.030312 15685 system_pods.go:89] "csi-hostpath-attacher-0" [5df964db-0e1a-4ab2-8c1b-08ccefda59c8] Pending / Ready:ContainersNotReady (containers with unready status: [csi-attacher]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-attacher])
I0918 19:39:48.030318 15685 system_pods.go:89] "csi-hostpath-resizer-0" [8d31632d-9428-40c2-bd75-48370ba9df30] Pending / Ready:ContainersNotReady (containers with unready status: [csi-resizer]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-resizer])
I0918 19:39:48.030327 15685 system_pods.go:89] "csi-hostpathplugin-gx9ps" [1b4aca2b-c7e0-4fd0-9635-1f3a31317460] Pending / Ready:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter]) / ContainersReady:ContainersNotReady (containers with unready status: [csi-external-health-monitor-controller node-driver-registrar hostpath liveness-probe csi-provisioner csi-snapshotter])
I0918 19:39:48.030332 15685 system_pods.go:89] "etcd-addons-457129" [e8af1cd4-abf2-4101-885a-5b00aef361d5] Running
I0918 19:39:48.030336 15685 system_pods.go:89] "kube-apiserver-addons-457129" [96968776-c3eb-43ce-9ade-bcb3e2f1eae2] Running
I0918 19:39:48.030339 15685 system_pods.go:89] "kube-controller-manager-addons-457129" [de5540b1-56e3-4596-b224-772b8a326f2b] Running
I0918 19:39:48.030343 15685 system_pods.go:89] "kube-ingress-dns-minikube" [2a05d45a-a3e8-4472-b55d-3611ae4fae74] Running
I0918 19:39:48.030346 15685 system_pods.go:89] "kube-proxy-xk9xc" [684cdba6-dbb9-49b9-aeb9-718120abba98] Running
I0918 19:39:48.030350 15685 system_pods.go:89] "kube-scheduler-addons-457129" [7358eba4-20af-43f9-abb7-0f241b859124] Running
I0918 19:39:48.030355 15685 system_pods.go:89] "metrics-server-84c5f94fbc-h4fl2" [a77387ba-6450-4ffe-9aa7-de0bf96f74da] Pending / Ready:ContainersNotReady (containers with unready status: [metrics-server]) / ContainersReady:ContainersNotReady (containers with unready status: [metrics-server])
I0918 19:39:48.030359 15685 system_pods.go:89] "nvidia-device-plugin-daemonset-5p5wt" [b9ee4eb2-3471-4a3f-83b8-cd8dabafe83c] Running
I0918 19:39:48.030362 15685 system_pods.go:89] "registry-66c9cd494c-kh4r9" [b2944da3-d9b7-4de7-8a57-f934ec8b2970] Running
I0918 19:39:48.030385 15685 system_pods.go:89] "registry-proxy-cnkhj" [e15cccd8-7fcb-48c9-9dc2-e79744e87759] Running
I0918 19:39:48.030391 15685 system_pods.go:89] "snapshot-controller-56fcc65765-pw2mr" [17a077f4-27f7-4a22-95e4-47ed4cc7f9e3] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
I0918 19:39:48.030396 15685 system_pods.go:89] "snapshot-controller-56fcc65765-s87zp" [4de05312-3887-4578-b327-e1d6e6ead173] Pending / Ready:ContainersNotReady (containers with unready status: [volume-snapshot-controller]) / ContainersReady:ContainersNotReady (containers with unready status: [volume-snapshot-controller])
I0918 19:39:48.030400 15685 system_pods.go:89] "storage-provisioner" [c887feeb-fb7b-495f-a43c-fca8ac121bca] Running
I0918 19:39:48.030403 15685 system_pods.go:89] "tiller-deploy-b48cc5f79-k64kf" [b79b5bb4-d211-4aa6-9551-5d2305acc2b2] Running
I0918 19:39:48.030409 15685 system_pods.go:126] duration metric: took 205.526569ms to wait for k8s-apps to be running ...
I0918 19:39:48.030416 15685 system_svc.go:44] waiting for kubelet service to be running ....
I0918 19:39:48.030461 15685 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet
I0918 19:39:48.041327 15685 system_svc.go:56] duration metric: took 10.90412ms WaitForService to wait for kubelet
I0918 19:39:48.041351 15685 kubeadm.go:582] duration metric: took 43.767903151s to wait for: map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true]
I0918 19:39:48.041367 15685 node_conditions.go:102] verifying NodePressure condition ...
I0918 19:39:48.225997 15685 node_conditions.go:122] node storage ephemeral capacity is 304681132Ki
I0918 19:39:48.226028 15685 node_conditions.go:123] node cpu capacity is 8
I0918 19:39:48.226042 15685 node_conditions.go:105] duration metric: took 184.671252ms to run NodePressure ...
I0918 19:39:48.226056 15685 start.go:241] waiting for startup goroutines ...
I0918 19:39:48.317203 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:48.425562 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:48.817286 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:48.924980 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:49.316991 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:49.425323 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:49.817665 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:49.924742 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:50.318618 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:50.425298 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:50.817542 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:50.925352 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:51.317018 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:51.425675 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:51.816946 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:51.925199 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:52.317239 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:52.425530 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:52.816484 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:52.925016 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:53.316973 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:53.425628 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:53.817865 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:53.925988 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:54.317636 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:54.425033 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:54.817592 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:54.925317 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:55.380950 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:55.425274 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:55.817122 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:55.924881 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:56.317556 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:56.425864 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:56.817160 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:56.924904 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:57.317782 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:57.432705 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:57.817594 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:57.925185 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:58.318573 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:58.425384 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:58.816692 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:58.925131 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:59.317649 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:59.424864 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:39:59.817330 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:39:59.925175 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:00.318533 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:40:00.426047 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:00.817610 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:40:00.925393 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:01.317644 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:40:01.533477 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:01.817259 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:40:01.925179 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:02.317218 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:40:02.424606 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:02.817535 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:40:02.924515 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:03.317297 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:40:03.425708 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:03.829461 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:40:03.925603 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:04.317114 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:40:04.426122 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:04.817257 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:40:04.925302 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:05.317618 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:40:05.424810 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:05.817341 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:40:05.925280 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:06.317224 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:40:06.425929 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:06.817152 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:40:06.938628 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:07.317285 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:40:07.425142 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:07.817602 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:40:07.925504 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:08.317005 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:40:08.425844 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:08.817563 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:40:08.925337 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:09.317234 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:40:09.425239 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:09.817376 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:40:09.925306 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:10.317584 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:40:10.425565 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:10.819798 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:40:10.925798 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:11.317949 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:40:11.426105 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:11.817355 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:40:11.925595 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:12.317885 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:40:12.425890 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:12.817513 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:40:12.924454 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:13.317910 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:40:13.425731 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:13.817213 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:40:13.925753 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:14.317811 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:40:14.425976 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:14.817753 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:40:14.924849 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:15.317302 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:40:15.479102 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:15.817018 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=csi-hostpath-driver", current state: Pending: [<nil>]
I0918 19:40:15.925731 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:16.317281 15685 kapi.go:107] duration metric: took 57.504122489s to wait for kubernetes.io/minikube-addons=csi-hostpath-driver ...
I0918 19:40:16.424712 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:16.924965 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:17.425548 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:17.925212 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:18.426717 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:18.925591 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:19.503186 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:19.924727 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:20.425927 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:20.925302 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:21.425991 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:21.931214 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:22.426406 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:22.924766 15685 kapi.go:96] waiting for pod "app.kubernetes.io/name=ingress-nginx", current state: Pending: [<nil>]
I0918 19:40:23.425220 15685 kapi.go:107] duration metric: took 1m10.00402563s to wait for app.kubernetes.io/name=ingress-nginx ...
I0918 19:40:42.815384 15685 kapi.go:86] Found 1 Pods for label selector kubernetes.io/minikube-addons=gcp-auth
I0918 19:40:42.815407 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:40:43.315513 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:40:43.815813 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:40:44.316000 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:40:44.815324 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:40:45.315381 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:40:45.815191 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:40:46.316743 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:40:46.815902 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:40:47.315953 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:40:47.816244 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:40:48.316671 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:40:48.815666 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:40:49.315391 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:40:49.815536 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:40:50.316433 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:40:50.815545 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:40:51.315413 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:40:51.815737 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:40:52.315862 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:40:52.815862 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:40:53.316021 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:40:53.815588 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:40:54.315555 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:40:54.815614 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:40:55.315364 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:40:55.815307 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:40:56.315608 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:40:56.815100 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:40:57.316029 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:40:57.815688 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:40:58.315609 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:40:58.815742 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:40:59.315603 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:40:59.815310 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:00.315614 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:00.815718 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:01.315639 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:01.815989 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:02.316119 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:02.816382 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:03.315233 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:03.815663 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:04.315942 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:04.816235 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:05.316136 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:05.816100 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:06.316659 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:06.815284 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:07.316220 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:07.815164 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:08.316272 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:08.815474 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:09.315726 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:09.815341 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:10.315573 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:10.815585 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:11.315252 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:11.816158 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:12.316521 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:12.816065 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:13.315955 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:13.815509 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:14.315915 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:14.815993 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:15.315954 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:15.815742 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:16.316098 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:16.816275 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:17.315893 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:17.815674 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:18.315932 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:18.816375 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:19.315194 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:19.816150 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:20.316170 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:20.816715 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:21.315420 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:21.815266 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:22.316663 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:22.817524 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:23.315257 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:23.816378 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:24.315147 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:24.815934 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:25.315975 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:25.815907 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:26.315517 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:26.815814 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:27.315429 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:27.815261 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:28.316209 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:28.815449 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:29.315573 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:29.815204 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:30.316451 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:30.815758 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:31.315478 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:31.816325 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:32.315808 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:32.815784 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:33.316156 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:33.815596 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:34.316116 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:34.815730 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:35.315689 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:35.815064 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:36.316136 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:36.816087 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:37.315626 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:37.815431 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:38.315843 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:38.816029 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:39.316158 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:39.816171 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:40.316278 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:40.816766 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:41.315707 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:41.815639 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:42.315965 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:42.815864 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:43.316108 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:43.815576 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:44.315817 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:44.815754 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:45.315576 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:45.815324 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:46.315654 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:46.816001 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:47.315417 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:47.815141 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:48.316283 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:48.815591 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:49.315257 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:49.815653 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:50.315567 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:50.815629 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:51.347189 15685 kapi.go:96] waiting for pod "kubernetes.io/minikube-addons=gcp-auth", current state: Pending: [<nil>]
I0918 19:41:51.816224 15685 kapi.go:107] duration metric: took 2m31.503832805s to wait for kubernetes.io/minikube-addons=gcp-auth ...
I0918 19:41:51.818182 15685 out.go:177] * Your GCP credentials will now be mounted into every pod created in the addons-457129 cluster.
I0918 19:41:51.819682 15685 out.go:177] * If you don't want your credentials mounted into a specific pod, add a label with the `gcp-auth-skip-secret` key to your pod configuration.
I0918 19:41:51.821239 15685 out.go:177] * If you want existing pods to be mounted with credentials, either recreate them or rerun addons enable with --refresh.
I0918 19:41:51.822700 15685 out.go:177] * Enabled addons: cloud-spanner, default-storageclass, ingress-dns, storage-provisioner-rancher, volcano, nvidia-device-plugin, helm-tiller, storage-provisioner, inspektor-gadget, metrics-server, yakd, volumesnapshots, registry, csi-hostpath-driver, ingress, gcp-auth
I0918 19:41:51.824006 15685 addons.go:510] duration metric: took 2m47.550538049s for enable addons: enabled=[cloud-spanner default-storageclass ingress-dns storage-provisioner-rancher volcano nvidia-device-plugin helm-tiller storage-provisioner inspektor-gadget metrics-server yakd volumesnapshots registry csi-hostpath-driver ingress gcp-auth]
I0918 19:41:51.824052 15685 start.go:246] waiting for cluster config update ...
I0918 19:41:51.824085 15685 start.go:255] writing updated cluster config ...
I0918 19:41:51.824346 15685 ssh_runner.go:195] Run: rm -f paused
I0918 19:41:51.873628 15685 start.go:600] kubectl: 1.31.1, cluster: 1.31.1 (minor skew: 0)
I0918 19:41:51.875457 15685 out.go:177] * Done! kubectl is now configured to use "addons-457129" cluster and "default" namespace by default
==> Docker <==
Sep 18 19:51:24 addons-457129 dockerd[1341]: time="2024-09-18T19:51:24.707589739Z" level=info msg="ignoring event" container=5e8bc0e951e4a4091e6df05ae23b8858bb442b1149f1d65f656be6e8f55161d1 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 18 19:51:24 addons-457129 dockerd[1341]: time="2024-09-18T19:51:24.740829174Z" level=info msg="ignoring event" container=401004fdf6d8cea80dd8d820bf26f23c4343202d6b894026dc05f0d2d571035d module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 18 19:51:25 addons-457129 cri-dockerd[1605]: time="2024-09-18T19:51:25Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/2d946c75d6e81ec3b1e42ff327e03b8814897aa2196fa0b88cf77ebba88a9924/resolv.conf as [nameserver 10.96.0.10 search default.svc.cluster.local svc.cluster.local cluster.local europe-west1-b.c.k8s-minikube.internal c.k8s-minikube.internal google.internal options ndots:5]"
Sep 18 19:51:27 addons-457129 dockerd[1341]: time="2024-09-18T19:51:27.406559505Z" level=info msg="Container failed to exit within 30s of signal 15 - using the force" container=259f7e08b743ef82b7bcc42c5e95352ff0569be51f44ef09e620ef17be4d95eb
Sep 18 19:51:27 addons-457129 dockerd[1341]: time="2024-09-18T19:51:27.434397754Z" level=info msg="ignoring event" container=259f7e08b743ef82b7bcc42c5e95352ff0569be51f44ef09e620ef17be4d95eb module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 18 19:51:27 addons-457129 cri-dockerd[1605]: time="2024-09-18T19:51:27Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"local-path-provisioner-86d989889c-4bk8b_local-path-storage\": unexpected command output nsenter: cannot open /proc/4059/ns/net: No such file or directory\n with error: exit status 1"
Sep 18 19:51:27 addons-457129 dockerd[1341]: time="2024-09-18T19:51:27.645652950Z" level=info msg="ignoring event" container=70e3de0610cb123de7a8049fd83a80f3bac5e5f3f0463ab93b65e72ac59dafe0 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 18 19:51:27 addons-457129 dockerd[1341]: time="2024-09-18T19:51:27.930917542Z" level=info msg="ignoring event" container=41ac19e47011d188367b8bfdae02623a4ecde0e107c309891e9b46be24c37425 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 18 19:51:28 addons-457129 cri-dockerd[1605]: time="2024-09-18T19:51:28Z" level=info msg="Stop pulling image docker.io/nginx:alpine: Status: Downloaded newer image for nginx:alpine"
Sep 18 19:51:28 addons-457129 dockerd[1341]: time="2024-09-18T19:51:28.983186430Z" level=info msg="Attempting next endpoint for pull after error: Head \"https://gcr.io/v2/k8s-minikube/busybox/manifests/latest\": unauthorized: authentication failed"
Sep 18 19:51:28 addons-457129 dockerd[1341]: time="2024-09-18T19:51:28.985554417Z" level=error msg="Handler for POST /v1.43/images/create returned error: Head \"https://gcr.io/v2/k8s-minikube/busybox/manifests/latest\": unauthorized: authentication failed"
Sep 18 19:51:36 addons-457129 cri-dockerd[1605]: time="2024-09-18T19:51:36Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/ff558cf262610bc15ebe8a8a6232c203b18cbbcff2334120a5bbe8ffa6bd51d5/resolv.conf as [nameserver 10.96.0.10 search default.svc.cluster.local svc.cluster.local cluster.local europe-west1-b.c.k8s-minikube.internal c.k8s-minikube.internal google.internal options ndots:5]"
Sep 18 19:51:37 addons-457129 dockerd[1341]: time="2024-09-18T19:51:37.293622066Z" level=info msg="ignoring event" container=b322925f6b0825948d970e7f820a5df93e1fc6490b13db0a3dcfe0d1702c304d module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 18 19:51:37 addons-457129 dockerd[1341]: time="2024-09-18T19:51:37.342124932Z" level=info msg="ignoring event" container=ab4cf8bc40d313df7fb7cca524a351535409277f536ab063ef4d2596935460c2 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 18 19:51:38 addons-457129 cri-dockerd[1605]: time="2024-09-18T19:51:38Z" level=info msg="Stop pulling image docker.io/kicbase/echo-server:1.0: Status: Downloaded newer image for kicbase/echo-server:1.0"
Sep 18 19:51:41 addons-457129 dockerd[1341]: time="2024-09-18T19:51:41.564963998Z" level=info msg="Container failed to exit within 2s of signal 15 - using the force" container=df1d7bfc470b870526be4429c625921ed400074cea89715731ea7d841e39c960
Sep 18 19:51:41 addons-457129 dockerd[1341]: time="2024-09-18T19:51:41.630372382Z" level=info msg="ignoring event" container=df1d7bfc470b870526be4429c625921ed400074cea89715731ea7d841e39c960 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 18 19:51:41 addons-457129 dockerd[1341]: time="2024-09-18T19:51:41.766086673Z" level=info msg="ignoring event" container=6b593e84835c84e72fa379fb1ccddd2d5bfdf994aaf91012f54137a21da458fe module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 18 19:51:46 addons-457129 dockerd[1341]: time="2024-09-18T19:51:46.472872015Z" level=info msg="ignoring event" container=0f8d0d72f3c85439a030c543a9e8ce244636ee3d0aed98955322c358c79714d0 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 18 19:51:46 addons-457129 dockerd[1341]: time="2024-09-18T19:51:46.954959018Z" level=info msg="ignoring event" container=4c486a2ba62d5383cdd72143b513fdb49214075aa93a3bd189fb34a1beaa9db0 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 18 19:51:47 addons-457129 dockerd[1341]: time="2024-09-18T19:51:47.019137879Z" level=info msg="ignoring event" container=5470624e82176db770c949f73a99c574ae9a1480198b8a9af84163161198b0e0 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 18 19:51:47 addons-457129 dockerd[1341]: time="2024-09-18T19:51:47.140728783Z" level=info msg="ignoring event" container=771592a28b29e4c807fdd12471c05d9498f9c2f847f1ad3a6f3869d3be91b168 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
Sep 18 19:51:47 addons-457129 cri-dockerd[1605]: time="2024-09-18T19:51:47Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"registry-66c9cd494c-kh4r9_kube-system\": unexpected command output Device \"eth0\" does not exist.\n with error: exit status 1"
Sep 18 19:51:47 addons-457129 cri-dockerd[1605]: time="2024-09-18T19:51:47Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"registry-proxy-cnkhj_kube-system\": unexpected command output Device \"eth0\" does not exist.\n with error: exit status 1"
Sep 18 19:51:47 addons-457129 dockerd[1341]: time="2024-09-18T19:51:47.210491602Z" level=info msg="ignoring event" container=29ac5773e878d1951d033f111e09a98ec346a63ef47f1ca9b3a5c5c3366c708b module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete"
==> container status <==
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD
b196be8aab02a kicbase/echo-server@sha256:127ac38a2bb9537b7f252addff209ea6801edcac8a92c8b1104dacd66a583ed6 9 seconds ago Running hello-world-app 0 ff558cf262610 hello-world-app-55bf9c44b4-vdlrw
308a947d4f51a nginx@sha256:a5127daff3d6f4606be3100a252419bfa84fd6ee5cd74d0feaca1a5068f97dcf 19 seconds ago Running nginx 0 2d946c75d6e81 nginx
647589918e7d3 a416a98b71e22 50 seconds ago Exited helper-pod 0 cc6036ab2cf89 helper-pod-delete-pvc-6c5c3a13-dc76-4ea5-ae23-b00403f48891
281e449b44a46 gcr.io/k8s-minikube/gcp-auth-webhook@sha256:e6c5b3bc32072ea370d34c27836efd11b3519d25bd444c2a8efc339cff0e20fb 9 minutes ago Running gcp-auth 0 867a8bdf3438b gcp-auth-89d5ffd79-svg79
2b15be079b6e6 ce263a8653f9c 11 minutes ago Exited patch 1 72fc6c00fc7b1 ingress-nginx-admission-patch-j8ftn
0b315ce69998d registry.k8s.io/ingress-nginx/kube-webhook-certgen@sha256:a320a50cc91bd15fd2d6fa6de58bd98c1bd64b9a6f926ce23a600d87043455a3 11 minutes ago Exited create 0 c6c2cc5bc26e9 ingress-nginx-admission-create-xtm5n
5470624e82176 gcr.io/k8s-minikube/kube-registry-proxy@sha256:b3fa0b2df8737fdb85ad5918a7e2652527463e357afff83a5e5bb966bcedc367 12 minutes ago Exited registry-proxy 0 29ac5773e878d registry-proxy-cnkhj
4c486a2ba62d5 registry@sha256:ac0192b549007e22998eb74e8d8488dcfe70f1489520c3b144a6047ac5efbe90 12 minutes ago Exited registry 0 771592a28b29e registry-66c9cd494c-kh4r9
2158d332c2787 6e38f40d628db 12 minutes ago Running storage-provisioner 0 862e35b78e5db storage-provisioner
520375bbab8c8 c69fa2e9cbf5f 12 minutes ago Running coredns 0 f7ce44fe4c3d7 coredns-7c65d6cfc9-qw624
48554c8d2b4c3 60c005f310ff3 12 minutes ago Running kube-proxy 0 8ec769d31087d kube-proxy-xk9xc
b53b2c10175a3 175ffd71cce3d 12 minutes ago Running kube-controller-manager 0 8839cc09d838e kube-controller-manager-addons-457129
90ef02d186701 9aa1fad941575 12 minutes ago Running kube-scheduler 0 826160e2b63a0 kube-scheduler-addons-457129
706c4839867d6 2e96e5913fc06 12 minutes ago Running etcd 0 ad2ee4620ea4a etcd-addons-457129
9a08629dca4d5 6bab7719df100 12 minutes ago Running kube-apiserver 0 e1ac926cf3599 kube-apiserver-addons-457129
==> coredns [520375bbab8c] <==
[INFO] 10.244.0.22:53549 - 39122 "AAAA IN hello-world-app.default.svc.cluster.local.c.k8s-minikube.internal. udp 83 false 512" NXDOMAIN qr,rd,ra 83 0.066222524s
[INFO] 10.244.0.22:55262 - 43737 "AAAA IN hello-world-app.default.svc.cluster.local.c.k8s-minikube.internal. udp 83 false 512" NXDOMAIN qr,rd,ra 83 0.066317318s
[INFO] 10.244.0.22:41193 - 26256 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.00007456s
[INFO] 10.244.0.22:48521 - 5664 "AAAA IN hello-world-app.default.svc.cluster.local.c.k8s-minikube.internal. udp 83 false 512" NXDOMAIN qr,rd,ra 83 0.005061844s
[INFO] 10.244.0.22:59634 - 54805 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.005947442s
[INFO] 10.244.0.22:38386 - 64945 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.006511268s
[INFO] 10.244.0.22:41454 - 59725 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.006191497s
[INFO] 10.244.0.22:35656 - 63415 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.006348565s
[INFO] 10.244.0.22:53549 - 11700 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.006280856s
[INFO] 10.244.0.22:55262 - 47043 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.006324048s
[INFO] 10.244.0.22:48521 - 14502 "A IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.004489403s
[INFO] 10.244.0.22:53549 - 46889 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.005352785s
[INFO] 10.244.0.22:35656 - 3931 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.004623446s
[INFO] 10.244.0.22:48521 - 14716 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.001996261s
[INFO] 10.244.0.22:41454 - 40135 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.004213724s
[INFO] 10.244.0.22:59634 - 23446 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.005434644s
[INFO] 10.244.0.22:53549 - 30411 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000078176s
[INFO] 10.244.0.22:38386 - 49884 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.005692955s
[INFO] 10.244.0.22:41454 - 57892 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.00010086s
[INFO] 10.244.0.22:35656 - 48392 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000059121s
[INFO] 10.244.0.22:48521 - 38680 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000064209s
[INFO] 10.244.0.22:38386 - 31313 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000040068s
[INFO] 10.244.0.22:59634 - 33788 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000069708s
[INFO] 10.244.0.22:55262 - 43125 "AAAA IN hello-world-app.default.svc.cluster.local.google.internal. udp 75 false 512" NXDOMAIN qr,rd,ra 75 0.004802538s
[INFO] 10.244.0.22:55262 - 12964 "A IN hello-world-app.default.svc.cluster.local. udp 59 false 512" NOERROR qr,aa,rd 116 0.000076087s
==> describe nodes <==
Name: addons-457129
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=addons-457129
kubernetes.io/os=linux
minikube.k8s.io/commit=85073601a832bd4bbda5d11fa91feafff6ec6b91
minikube.k8s.io/name=addons-457129
minikube.k8s.io/primary=true
minikube.k8s.io/updated_at=2024_09_18T19_39_00_0700
minikube.k8s.io/version=v1.34.0
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
topology.hostpath.csi/node=addons-457129
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///var/run/cri-dockerd.sock
node.alpha.kubernetes.io/ttl: 0
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Wed, 18 Sep 2024 19:38:56 +0000
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: addons-457129
AcquireTime: <unset>
RenewTime: Wed, 18 Sep 2024 19:51:44 +0000
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
MemoryPressure False Wed, 18 Sep 2024 19:51:33 +0000 Wed, 18 Sep 2024 19:38:55 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Wed, 18 Sep 2024 19:51:33 +0000 Wed, 18 Sep 2024 19:38:55 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Wed, 18 Sep 2024 19:51:33 +0000 Wed, 18 Sep 2024 19:38:55 +0000 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Wed, 18 Sep 2024 19:51:33 +0000 Wed, 18 Sep 2024 19:38:57 +0000 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 192.168.49.2
Hostname: addons-457129
Capacity:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32859320Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 304681132Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32859320Ki
pods: 110
System Info:
Machine ID: 672a2153f81848b0b6aa92e77d87fdcb
System UUID: a546e2b2-4d4f-424e-84c6-7e85755f65c3
Boot ID: d3463f46-6a21-414a-b4ed-44cb759d1998
Kernel Version: 5.15.0-1069-gcp
OS Image: Ubuntu 22.04.5 LTS
Operating System: linux
Architecture: amd64
Container Runtime Version: docker://27.2.1
Kubelet Version: v1.31.1
Kube-Proxy Version: v1.31.1
PodCIDR: 10.244.0.0/24
PodCIDRs: 10.244.0.0/24
Non-terminated Pods: (11 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
default busybox 0 (0%) 0 (0%) 0 (0%) 0 (0%) 9m15s
default hello-world-app-55bf9c44b4-vdlrw 0 (0%) 0 (0%) 0 (0%) 0 (0%) 11s
default nginx 0 (0%) 0 (0%) 0 (0%) 0 (0%) 22s
gcp-auth gcp-auth-89d5ffd79-svg79 0 (0%) 0 (0%) 0 (0%) 0 (0%) 11m
kube-system coredns-7c65d6cfc9-qw624 100m (1%) 0 (0%) 70Mi (0%) 170Mi (0%) 12m
kube-system etcd-addons-457129 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 12m
kube-system kube-apiserver-addons-457129 250m (3%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system kube-controller-manager-addons-457129 200m (2%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system kube-proxy-xk9xc 0 (0%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system kube-scheduler-addons-457129 100m (1%) 0 (0%) 0 (0%) 0 (0%) 12m
kube-system storage-provisioner 0 (0%) 0 (0%) 0 (0%) 0 (0%) 12m
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 750m (9%) 0 (0%)
memory 170Mi (0%) 170Mi (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 12m kube-proxy
Normal NodeAllocatableEnforced 12m kubelet Updated Node Allocatable limit across pods
Warning CgroupV1 12m kubelet Cgroup v1 support is in maintenance mode, please migrate to Cgroup v2.
Normal NodeHasSufficientMemory 12m (x8 over 12m) kubelet Node addons-457129 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 12m (x7 over 12m) kubelet Node addons-457129 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 12m (x7 over 12m) kubelet Node addons-457129 status is now: NodeHasSufficientPID
Normal Starting 12m kubelet Starting kubelet.
Normal Starting 12m kubelet Starting kubelet.
Warning CgroupV1 12m kubelet Cgroup v1 support is in maintenance mode, please migrate to Cgroup v2.
Normal NodeAllocatableEnforced 12m kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 12m kubelet Node addons-457129 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 12m kubelet Node addons-457129 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 12m kubelet Node addons-457129 status is now: NodeHasSufficientPID
Normal RegisteredNode 12m node-controller Node addons-457129 event: Registered Node addons-457129 in Controller
==> dmesg <==
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff ba 78 8b 1e 9b 05 08 06
[ +2.987014] IPv4: martian source 10.244.0.1 from 10.244.0.18, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff 96 40 e6 e6 d8 8a 08 06
[ +6.085908] IPv4: martian source 10.244.0.1 from 10.244.0.21, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff 0a 2e de 0a 23 fd 08 06
[ +0.162722] IPv4: martian source 10.244.0.1 from 10.244.0.20, on dev eth0
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff 0e 64 95 17 7d b3 08 06
[ +0.187511] IPv4: martian source 10.244.0.1 from 10.244.0.19, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff 6a 88 c3 ed 54 5f 08 06
[ +8.959933] IPv4: martian source 10.244.0.1 from 10.244.0.22, on dev eth0
[ +0.000005] ll header: 00000000: ff ff ff ff ff ff 3a c9 dc 3e c3 0e 08 06
[Sep18 19:41] IPv4: martian source 10.244.0.1 from 10.244.0.24, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff e6 bd b0 c3 ad b7 08 06
[ +0.082010] IPv4: martian source 10.244.0.1 from 10.244.0.25, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff 22 de 04 87 a8 70 08 06
[ +27.408104] IPv4: martian source 10.244.0.1 from 10.244.0.26, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff 82 a0 9f 7a 90 e3 08 06
[ +0.000449] IPv4: martian source 10.244.0.26 from 10.244.0.3, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff 0a 3c 9f 25 06 d5 08 06
[Sep18 19:50] IPv4: martian source 10.244.0.1 from 10.244.0.29, on dev eth0
[ +0.000008] ll header: 00000000: ff ff ff ff ff ff f2 b6 62 f6 bb 32 08 06
[Sep18 19:51] IPv4: martian source 10.244.0.37 from 10.244.0.22, on dev eth0
[ +0.000006] ll header: 00000000: ff ff ff ff ff ff 3a c9 dc 3e c3 0e 08 06
[ +1.743433] IPv4: martian source 10.244.0.22 from 10.244.0.3, on dev eth0
[ +0.000007] ll header: 00000000: ff ff ff ff ff ff 0a 3c 9f 25 06 d5 08 06
==> etcd [706c4839867d] <==
{"level":"info","ts":"2024-09-18T19:38:55.810610Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became pre-candidate at term 1"}
{"level":"info","ts":"2024-09-18T19:38:55.810649Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc received MsgPreVoteResp from aec36adc501070cc at term 1"}
{"level":"info","ts":"2024-09-18T19:38:55.810668Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became candidate at term 2"}
{"level":"info","ts":"2024-09-18T19:38:55.810674Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc received MsgVoteResp from aec36adc501070cc at term 2"}
{"level":"info","ts":"2024-09-18T19:38:55.810682Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"aec36adc501070cc became leader at term 2"}
{"level":"info","ts":"2024-09-18T19:38:55.810689Z","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: aec36adc501070cc elected leader aec36adc501070cc at term 2"}
{"level":"info","ts":"2024-09-18T19:38:55.811748Z","caller":"etcdserver/server.go:2118","msg":"published local member to cluster through raft","local-member-id":"aec36adc501070cc","local-member-attributes":"{Name:addons-457129 ClientURLs:[https://192.168.49.2:2379]}","request-path":"/0/members/aec36adc501070cc/attributes","cluster-id":"fa54960ea34d58be","publish-timeout":"7s"}
{"level":"info","ts":"2024-09-18T19:38:55.811807Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2024-09-18T19:38:55.811830Z","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
{"level":"info","ts":"2024-09-18T19:38:55.811906Z","caller":"etcdserver/server.go:2629","msg":"setting up initial cluster version using v2 API","cluster-version":"3.5"}
{"level":"info","ts":"2024-09-18T19:38:55.811949Z","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
{"level":"info","ts":"2024-09-18T19:38:55.811966Z","caller":"etcdmain/main.go:50","msg":"successfully notified init daemon"}
{"level":"info","ts":"2024-09-18T19:38:55.812673Z","caller":"membership/cluster.go:584","msg":"set initial cluster version","cluster-id":"fa54960ea34d58be","local-member-id":"aec36adc501070cc","cluster-version":"3.5"}
{"level":"info","ts":"2024-09-18T19:38:55.812713Z","caller":"v3rpc/health.go:61","msg":"grpc service status changed","service":"","status":"SERVING"}
{"level":"info","ts":"2024-09-18T19:38:55.812763Z","caller":"api/capability.go:75","msg":"enabled capabilities for version","cluster-version":"3.5"}
{"level":"info","ts":"2024-09-18T19:38:55.812832Z","caller":"etcdserver/server.go:2653","msg":"cluster version is updated","cluster-version":"3.5"}
{"level":"info","ts":"2024-09-18T19:38:55.812763Z","caller":"v3rpc/health.go:61","msg":"grpc service status changed","service":"","status":"SERVING"}
{"level":"info","ts":"2024-09-18T19:38:55.814187Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"127.0.0.1:2379"}
{"level":"info","ts":"2024-09-18T19:38:55.814264Z","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"192.168.49.2:2379"}
{"level":"info","ts":"2024-09-18T19:39:09.910437Z","caller":"traceutil/trace.go:171","msg":"trace[1408545621] transaction","detail":"{read_only:false; response_revision:542; number_of_response:1; }","duration":"103.285543ms","start":"2024-09-18T19:39:09.807118Z","end":"2024-09-18T19:39:09.910403Z","steps":["trace[1408545621] 'process raft request' (duration: 99.869111ms)"],"step_count":1}
{"level":"warn","ts":"2024-09-18T19:40:01.530764Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"108.008815ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/pods\" limit:1 ","response":"range_response_count:0 size:5"}
{"level":"info","ts":"2024-09-18T19:40:01.530840Z","caller":"traceutil/trace.go:171","msg":"trace[893106699] range","detail":"{range_begin:/registry/pods; range_end:; response_count:0; response_revision:1177; }","duration":"108.107721ms","start":"2024-09-18T19:40:01.422721Z","end":"2024-09-18T19:40:01.530829Z","steps":["trace[893106699] 'range keys from in-memory index tree' (duration: 107.946532ms)"],"step_count":1}
{"level":"info","ts":"2024-09-18T19:48:55.833077Z","caller":"mvcc/index.go:214","msg":"compact tree index","revision":1913}
{"level":"info","ts":"2024-09-18T19:48:55.859495Z","caller":"mvcc/kvstore_compaction.go:69","msg":"finished scheduled compaction","compact-revision":1913,"took":"25.892845ms","hash":684421458,"current-db-size-bytes":9154560,"current-db-size":"9.2 MB","current-db-size-in-use-bytes":5021696,"current-db-size-in-use":"5.0 MB"}
{"level":"info","ts":"2024-09-18T19:48:55.859543Z","caller":"mvcc/hash.go:137","msg":"storing new hash","hash":684421458,"revision":1913,"compact-revision":-1}
==> gcp-auth [281e449b44a4] <==
2024/09/18 19:42:32 Ready to write response ...
2024/09/18 19:50:40 Ready to marshal response ...
2024/09/18 19:50:40 Ready to write response ...
2024/09/18 19:50:46 Ready to marshal response ...
2024/09/18 19:50:46 Ready to write response ...
2024/09/18 19:50:46 Ready to marshal response ...
2024/09/18 19:50:46 Ready to write response ...
2024/09/18 19:50:46 Ready to marshal response ...
2024/09/18 19:50:46 Ready to write response ...
2024/09/18 19:50:54 Ready to marshal response ...
2024/09/18 19:50:54 Ready to write response ...
2024/09/18 19:50:56 Ready to marshal response ...
2024/09/18 19:50:56 Ready to write response ...
2024/09/18 19:50:58 Ready to marshal response ...
2024/09/18 19:50:58 Ready to write response ...
2024/09/18 19:50:58 Ready to marshal response ...
2024/09/18 19:50:58 Ready to write response ...
2024/09/18 19:50:58 Ready to marshal response ...
2024/09/18 19:50:58 Ready to write response ...
2024/09/18 19:51:07 Ready to marshal response ...
2024/09/18 19:51:07 Ready to write response ...
2024/09/18 19:51:25 Ready to marshal response ...
2024/09/18 19:51:25 Ready to write response ...
2024/09/18 19:51:36 Ready to marshal response ...
2024/09/18 19:51:36 Ready to write response ...
==> kernel <==
19:51:48 up 34 min, 0 users, load average: 1.00, 0.64, 0.51
Linux addons-457129 5.15.0-1069-gcp #77~20.04.1-Ubuntu SMP Sun Sep 1 19:39:16 UTC 2024 x86_64 x86_64 x86_64 GNU/Linux
PRETTY_NAME="Ubuntu 22.04.5 LTS"
==> kube-apiserver [9a08629dca4d] <==
W0918 19:42:24.524204 1 cacher.go:171] Terminating all watchers from cacher queues.scheduling.volcano.sh
W0918 19:42:24.629252 1 cacher.go:171] Terminating all watchers from cacher jobs.batch.volcano.sh
W0918 19:42:24.933501 1 cacher.go:171] Terminating all watchers from cacher jobflows.flow.volcano.sh
W0918 19:42:25.282794 1 cacher.go:171] Terminating all watchers from cacher jobtemplates.flow.volcano.sh
I0918 19:50:58.026545 1 alloc.go:330] "allocated clusterIPs" service="headlamp/headlamp" clusterIPs={"IPv4":"10.103.109.71"}
I0918 19:51:03.574284 1 controller.go:615] quota admission added evaluator for: volumesnapshots.snapshot.storage.k8s.io
E0918 19:51:12.552213 1 authentication.go:73] "Unable to authenticate the request" err="[invalid bearer token, serviceaccounts \"local-path-provisioner-service-account\" not found]"
I0918 19:51:24.387811 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0918 19:51:24.387861 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0918 19:51:24.401226 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0918 19:51:24.401280 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0918 19:51:24.402784 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0918 19:51:24.402827 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0918 19:51:24.412390 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0918 19:51:24.412455 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0918 19:51:24.424279 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1 to ResourceManager
I0918 19:51:24.424321 1 handler.go:286] Adding GroupVersion snapshot.storage.k8s.io v1beta1 to ResourceManager
I0918 19:51:24.888866 1 controller.go:615] quota admission added evaluator for: ingresses.networking.k8s.io
I0918 19:51:25.055030 1 alloc.go:330] "allocated clusterIPs" service="default/nginx" clusterIPs={"IPv4":"10.105.100.245"}
W0918 19:51:25.403908 1 cacher.go:171] Terminating all watchers from cacher volumesnapshotclasses.snapshot.storage.k8s.io
W0918 19:51:25.425291 1 cacher.go:171] Terminating all watchers from cacher volumesnapshotcontents.snapshot.storage.k8s.io
W0918 19:51:25.435129 1 cacher.go:171] Terminating all watchers from cacher volumesnapshots.snapshot.storage.k8s.io
I0918 19:51:27.878485 1 handler.go:286] Adding GroupVersion gadget.kinvolk.io v1alpha1 to ResourceManager
W0918 19:51:28.893800 1 cacher.go:171] Terminating all watchers from cacher traces.gadget.kinvolk.io
I0918 19:51:36.613342 1 alloc.go:330] "allocated clusterIPs" service="default/hello-world-app" clusterIPs={"IPv4":"10.100.3.253"}
==> kube-controller-manager [b53b2c10175a] <==
I0918 19:51:34.336690 1 shared_informer.go:320] Caches are synced for garbage collector
I0918 19:51:36.440441 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="default/hello-world-app-55bf9c44b4" duration="14.444326ms"
I0918 19:51:36.445572 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="default/hello-world-app-55bf9c44b4" duration="5.081273ms"
I0918 19:51:36.445656 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="default/hello-world-app-55bf9c44b4" duration="45.09µs"
I0918 19:51:36.451833 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="default/hello-world-app-55bf9c44b4" duration="39.869µs"
W0918 19:51:37.215415 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0918 19:51:37.215455 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
I0918 19:51:37.984504 1 namespace_controller.go:187] "Namespace has been deleted" logger="namespace-controller" namespace="gadget"
I0918 19:51:38.540849 1 job_controller.go:568] "enqueueing job" logger="job-controller" key="ingress-nginx/ingress-nginx-admission-create" delay="0s"
I0918 19:51:38.542428 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="ingress-nginx/ingress-nginx-controller-bc57996ff" duration="9.087µs"
I0918 19:51:38.545043 1 job_controller.go:568] "enqueueing job" logger="job-controller" key="ingress-nginx/ingress-nginx-admission-patch" delay="0s"
I0918 19:51:39.995096 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="default/hello-world-app-55bf9c44b4" duration="5.057838ms"
I0918 19:51:39.995190 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="default/hello-world-app-55bf9c44b4" duration="52.278µs"
W0918 19:51:41.090120 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0918 19:51:41.090159 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0918 19:51:42.474152 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0918 19:51:42.474197 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0918 19:51:42.525760 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0918 19:51:42.525803 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0918 19:51:43.753618 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0918 19:51:43.753665 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
W0918 19:51:44.114022 1 reflector.go:561] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource
E0918 19:51:44.114078 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError"
I0918 19:51:44.902343 1 namespace_controller.go:187] "Namespace has been deleted" logger="namespace-controller" namespace="local-path-storage"
I0918 19:51:46.919526 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/registry-66c9cd494c" duration="9.094µs"
==> kube-proxy [48554c8d2b4c] <==
I0918 19:39:08.029487 1 server_linux.go:66] "Using iptables proxy"
I0918 19:39:08.517511 1 server.go:677] "Successfully retrieved node IP(s)" IPs=["192.168.49.2"]
E0918 19:39:08.517580 1 server.go:234] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"
I0918 19:39:09.016073 1 server.go:243] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4"
I0918 19:39:09.016153 1 server_linux.go:169] "Using iptables Proxier"
I0918 19:39:09.116193 1 proxier.go:255] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4"
I0918 19:39:09.116939 1 server.go:483] "Version info" version="v1.31.1"
I0918 19:39:09.116967 1 server.go:485] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I0918 19:39:09.207346 1 config.go:199] "Starting service config controller"
I0918 19:39:09.207416 1 shared_informer.go:313] Waiting for caches to sync for service config
I0918 19:39:09.207464 1 config.go:105] "Starting endpoint slice config controller"
I0918 19:39:09.207469 1 shared_informer.go:313] Waiting for caches to sync for endpoint slice config
I0918 19:39:09.311125 1 config.go:328] "Starting node config controller"
I0918 19:39:09.311166 1 shared_informer.go:313] Waiting for caches to sync for node config
I0918 19:39:09.412936 1 shared_informer.go:320] Caches are synced for node config
I0918 19:39:09.417216 1 shared_informer.go:320] Caches are synced for endpoint slice config
I0918 19:39:09.507916 1 shared_informer.go:320] Caches are synced for service config
==> kube-scheduler [90ef02d18670] <==
W0918 19:38:56.824797 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope
W0918 19:38:56.824802 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E0918 19:38:56.824814 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User \"system:kube-scheduler\" cannot list resource \"namespaces\" in API group \"\" at the cluster scope" logger="UnhandledError"
E0918 19:38:56.824820 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User \"system:kube-scheduler\" cannot list resource \"services\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0918 19:38:56.824908 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope
E0918 19:38:56.824934 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicasets\" in API group \"apps\" at the cluster scope" logger="UnhandledError"
W0918 19:38:56.824916 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
E0918 19:38:56.824974 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"statefulsets\" in API group \"apps\" at the cluster scope" logger="UnhandledError"
W0918 19:38:56.825033 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
E0918 19:38:56.825057 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0918 19:38:56.825111 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E0918 19:38:56.825131 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User \"system:kube-scheduler\" cannot list resource \"pods\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0918 19:38:57.686731 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope
E0918 19:38:57.686774 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0918 19:38:57.741328 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope
E0918 19:38:57.741373 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User \"system:kube-scheduler\" cannot list resource \"services\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0918 19:38:57.766648 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope
E0918 19:38:57.766682 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User \"system:kube-scheduler\" cannot list resource \"pods\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0918 19:38:57.810182 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope
E0918 19:38:57.810217 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError"
W0918 19:38:57.941184 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope
E0918 19:38:57.941231 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError"
W0918 19:38:57.974520 1 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope
E0918 19:38:57.974556 1 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"statefulsets\" in API group \"apps\" at the cluster scope" logger="UnhandledError"
I0918 19:38:58.322705 1 shared_informer.go:320] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file
==> kubelet <==
Sep 18 19:51:41 addons-457129 kubelet[2447]: I0918 19:51:41.928453 2447 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/63c9d7fe-35bd-4195-aaf1-14b4cd27848f-kube-api-access-72rls" (OuterVolumeSpecName: "kube-api-access-72rls") pod "63c9d7fe-35bd-4195-aaf1-14b4cd27848f" (UID: "63c9d7fe-35bd-4195-aaf1-14b4cd27848f"). InnerVolumeSpecName "kube-api-access-72rls". PluginName "kubernetes.io/projected", VolumeGidValue ""
Sep 18 19:51:41 addons-457129 kubelet[2447]: I0918 19:51:41.928446 2447 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63c9d7fe-35bd-4195-aaf1-14b4cd27848f-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "63c9d7fe-35bd-4195-aaf1-14b4cd27848f" (UID: "63c9d7fe-35bd-4195-aaf1-14b4cd27848f"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue ""
Sep 18 19:51:42 addons-457129 kubelet[2447]: I0918 19:51:42.005089 2447 scope.go:117] "RemoveContainer" containerID="df1d7bfc470b870526be4429c625921ed400074cea89715731ea7d841e39c960"
Sep 18 19:51:42 addons-457129 kubelet[2447]: I0918 19:51:42.019370 2447 scope.go:117] "RemoveContainer" containerID="df1d7bfc470b870526be4429c625921ed400074cea89715731ea7d841e39c960"
Sep 18 19:51:42 addons-457129 kubelet[2447]: E0918 19:51:42.020153 2447 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = Unknown desc = Error response from daemon: No such container: df1d7bfc470b870526be4429c625921ed400074cea89715731ea7d841e39c960" containerID="df1d7bfc470b870526be4429c625921ed400074cea89715731ea7d841e39c960"
Sep 18 19:51:42 addons-457129 kubelet[2447]: I0918 19:51:42.020202 2447 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"docker","ID":"df1d7bfc470b870526be4429c625921ed400074cea89715731ea7d841e39c960"} err="failed to get container status \"df1d7bfc470b870526be4429c625921ed400074cea89715731ea7d841e39c960\": rpc error: code = Unknown desc = Error response from daemon: No such container: df1d7bfc470b870526be4429c625921ed400074cea89715731ea7d841e39c960"
Sep 18 19:51:42 addons-457129 kubelet[2447]: I0918 19:51:42.027461 2447 reconciler_common.go:288] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/63c9d7fe-35bd-4195-aaf1-14b4cd27848f-webhook-cert\") on node \"addons-457129\" DevicePath \"\""
Sep 18 19:51:42 addons-457129 kubelet[2447]: I0918 19:51:42.027492 2447 reconciler_common.go:288] "Volume detached for volume \"kube-api-access-72rls\" (UniqueName: \"kubernetes.io/projected/63c9d7fe-35bd-4195-aaf1-14b4cd27848f-kube-api-access-72rls\") on node \"addons-457129\" DevicePath \"\""
Sep 18 19:51:43 addons-457129 kubelet[2447]: I0918 19:51:43.317249 2447 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="kube-system/registry-proxy-cnkhj" secret="" err="secret \"gcp-auth\" not found"
Sep 18 19:51:43 addons-457129 kubelet[2447]: E0918 19:51:43.319077 2447 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"registry-test\" with ImagePullBackOff: \"Back-off pulling image \\\"gcr.io/k8s-minikube/busybox\\\"\"" pod="default/registry-test" podUID="1dfdec76-47fb-4fa9-8ca4-15f9c0ffbbbe"
Sep 18 19:51:43 addons-457129 kubelet[2447]: I0918 19:51:43.330644 2447 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="63c9d7fe-35bd-4195-aaf1-14b4cd27848f" path="/var/lib/kubelet/pods/63c9d7fe-35bd-4195-aaf1-14b4cd27848f/volumes"
Sep 18 19:51:46 addons-457129 kubelet[2447]: E0918 19:51:46.318550 2447 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"busybox\" with ImagePullBackOff: \"Back-off pulling image \\\"gcr.io/k8s-minikube/busybox:1.28.4-glibc\\\"\"" pod="default/busybox" podUID="bea8b2a1-c35f-4acc-9aea-62afe7b40099"
Sep 18 19:51:46 addons-457129 kubelet[2447]: I0918 19:51:46.654133 2447 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1dfdec76-47fb-4fa9-8ca4-15f9c0ffbbbe-gcp-creds" (OuterVolumeSpecName: "gcp-creds") pod "1dfdec76-47fb-4fa9-8ca4-15f9c0ffbbbe" (UID: "1dfdec76-47fb-4fa9-8ca4-15f9c0ffbbbe"). InnerVolumeSpecName "gcp-creds". PluginName "kubernetes.io/host-path", VolumeGidValue ""
Sep 18 19:51:46 addons-457129 kubelet[2447]: I0918 19:51:46.654135 2447 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"gcp-creds\" (UniqueName: \"kubernetes.io/host-path/1dfdec76-47fb-4fa9-8ca4-15f9c0ffbbbe-gcp-creds\") pod \"1dfdec76-47fb-4fa9-8ca4-15f9c0ffbbbe\" (UID: \"1dfdec76-47fb-4fa9-8ca4-15f9c0ffbbbe\") "
Sep 18 19:51:46 addons-457129 kubelet[2447]: I0918 19:51:46.654211 2447 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v2dfm\" (UniqueName: \"kubernetes.io/projected/1dfdec76-47fb-4fa9-8ca4-15f9c0ffbbbe-kube-api-access-v2dfm\") pod \"1dfdec76-47fb-4fa9-8ca4-15f9c0ffbbbe\" (UID: \"1dfdec76-47fb-4fa9-8ca4-15f9c0ffbbbe\") "
Sep 18 19:51:46 addons-457129 kubelet[2447]: I0918 19:51:46.654315 2447 reconciler_common.go:288] "Volume detached for volume \"gcp-creds\" (UniqueName: \"kubernetes.io/host-path/1dfdec76-47fb-4fa9-8ca4-15f9c0ffbbbe-gcp-creds\") on node \"addons-457129\" DevicePath \"\""
Sep 18 19:51:46 addons-457129 kubelet[2447]: I0918 19:51:46.656108 2447 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1dfdec76-47fb-4fa9-8ca4-15f9c0ffbbbe-kube-api-access-v2dfm" (OuterVolumeSpecName: "kube-api-access-v2dfm") pod "1dfdec76-47fb-4fa9-8ca4-15f9c0ffbbbe" (UID: "1dfdec76-47fb-4fa9-8ca4-15f9c0ffbbbe"). InnerVolumeSpecName "kube-api-access-v2dfm". PluginName "kubernetes.io/projected", VolumeGidValue ""
Sep 18 19:51:46 addons-457129 kubelet[2447]: I0918 19:51:46.755409 2447 reconciler_common.go:288] "Volume detached for volume \"kube-api-access-v2dfm\" (UniqueName: \"kubernetes.io/projected/1dfdec76-47fb-4fa9-8ca4-15f9c0ffbbbe-kube-api-access-v2dfm\") on node \"addons-457129\" DevicePath \"\""
Sep 18 19:51:47 addons-457129 kubelet[2447]: I0918 19:51:47.323700 2447 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1dfdec76-47fb-4fa9-8ca4-15f9c0ffbbbe" path="/var/lib/kubelet/pods/1dfdec76-47fb-4fa9-8ca4-15f9c0ffbbbe/volumes"
Sep 18 19:51:47 addons-457129 kubelet[2447]: I0918 19:51:47.358689 2447 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8qc55\" (UniqueName: \"kubernetes.io/projected/e15cccd8-7fcb-48c9-9dc2-e79744e87759-kube-api-access-8qc55\") pod \"e15cccd8-7fcb-48c9-9dc2-e79744e87759\" (UID: \"e15cccd8-7fcb-48c9-9dc2-e79744e87759\") "
Sep 18 19:51:47 addons-457129 kubelet[2447]: I0918 19:51:47.358741 2447 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5w4bf\" (UniqueName: \"kubernetes.io/projected/b2944da3-d9b7-4de7-8a57-f934ec8b2970-kube-api-access-5w4bf\") pod \"b2944da3-d9b7-4de7-8a57-f934ec8b2970\" (UID: \"b2944da3-d9b7-4de7-8a57-f934ec8b2970\") "
Sep 18 19:51:47 addons-457129 kubelet[2447]: I0918 19:51:47.360529 2447 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e15cccd8-7fcb-48c9-9dc2-e79744e87759-kube-api-access-8qc55" (OuterVolumeSpecName: "kube-api-access-8qc55") pod "e15cccd8-7fcb-48c9-9dc2-e79744e87759" (UID: "e15cccd8-7fcb-48c9-9dc2-e79744e87759"). InnerVolumeSpecName "kube-api-access-8qc55". PluginName "kubernetes.io/projected", VolumeGidValue ""
Sep 18 19:51:47 addons-457129 kubelet[2447]: I0918 19:51:47.360596 2447 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2944da3-d9b7-4de7-8a57-f934ec8b2970-kube-api-access-5w4bf" (OuterVolumeSpecName: "kube-api-access-5w4bf") pod "b2944da3-d9b7-4de7-8a57-f934ec8b2970" (UID: "b2944da3-d9b7-4de7-8a57-f934ec8b2970"). InnerVolumeSpecName "kube-api-access-5w4bf". PluginName "kubernetes.io/projected", VolumeGidValue ""
Sep 18 19:51:47 addons-457129 kubelet[2447]: I0918 19:51:47.459877 2447 reconciler_common.go:288] "Volume detached for volume \"kube-api-access-8qc55\" (UniqueName: \"kubernetes.io/projected/e15cccd8-7fcb-48c9-9dc2-e79744e87759-kube-api-access-8qc55\") on node \"addons-457129\" DevicePath \"\""
Sep 18 19:51:47 addons-457129 kubelet[2447]: I0918 19:51:47.459947 2447 reconciler_common.go:288] "Volume detached for volume \"kube-api-access-5w4bf\" (UniqueName: \"kubernetes.io/projected/b2944da3-d9b7-4de7-8a57-f934ec8b2970-kube-api-access-5w4bf\") on node \"addons-457129\" DevicePath \"\""
==> storage-provisioner [2158d332c278] <==
I0918 19:39:13.009574 1 storage_provisioner.go:116] Initializing the minikube storage provisioner...
I0918 19:39:13.028274 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service!
I0918 19:39:13.028324 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath...
I0918 19:39:13.118382 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath
I0918 19:39:13.118576 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_addons-457129_177a6186-8cf5-479c-aa3e-e3c420f4cd6b!
I0918 19:39:13.120070 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"923447e7-add4-4524-920c-813a29a5a966", APIVersion:"v1", ResourceVersion:"663", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' addons-457129_177a6186-8cf5-479c-aa3e-e3c420f4cd6b became leader
I0918 19:39:13.219592 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_addons-457129_177a6186-8cf5-479c-aa3e-e3c420f4cd6b!
-- /stdout --
helpers_test.go:254: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p addons-457129 -n addons-457129
helpers_test.go:261: (dbg) Run: kubectl --context addons-457129 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running
helpers_test.go:272: non-running pods: busybox
helpers_test.go:274: ======> post-mortem[TestAddons/parallel/Registry]: describe non-running pods <======
helpers_test.go:277: (dbg) Run: kubectl --context addons-457129 describe pod busybox
helpers_test.go:282: (dbg) kubectl --context addons-457129 describe pod busybox:
-- stdout --
Name: busybox
Namespace: default
Priority: 0
Service Account: default
Node: addons-457129/192.168.49.2
Start Time: Wed, 18 Sep 2024 19:42:32 +0000
Labels: integration-test=busybox
Annotations: <none>
Status: Pending
IP: 10.244.0.28
IPs:
IP: 10.244.0.28
Containers:
busybox:
Container ID:
Image: gcr.io/k8s-minikube/busybox:1.28.4-glibc
Image ID:
Port: <none>
Host Port: <none>
Command:
sleep
3600
State: Waiting
Reason: ImagePullBackOff
Ready: False
Restart Count: 0
Environment:
GOOGLE_APPLICATION_CREDENTIALS: /google-app-creds.json
PROJECT_ID: this_is_fake
GCP_PROJECT: this_is_fake
GCLOUD_PROJECT: this_is_fake
GOOGLE_CLOUD_PROJECT: this_is_fake
CLOUDSDK_CORE_PROJECT: this_is_fake
Mounts:
/google-app-creds.json from gcp-creds (ro)
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-p8tk4 (ro)
Conditions:
Type Status
PodReadyToStartContainers True
Initialized True
Ready False
ContainersReady False
PodScheduled True
Volumes:
kube-api-access-p8tk4:
Type: Projected (a volume that contains injected data from multiple sources)
TokenExpirationSeconds: 3607
ConfigMapName: kube-root-ca.crt
ConfigMapOptional: <nil>
DownwardAPI: true
gcp-creds:
Type: HostPath (bare host directory volume)
Path: /var/lib/minikube/google_application_credentials.json
HostPathType: File
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 9m16s default-scheduler Successfully assigned default/busybox to addons-457129
Normal Pulling 7m43s (x4 over 9m15s) kubelet Pulling image "gcr.io/k8s-minikube/busybox:1.28.4-glibc"
Warning Failed 7m43s (x4 over 9m15s) kubelet Failed to pull image "gcr.io/k8s-minikube/busybox:1.28.4-glibc": Error response from daemon: Head "https://gcr.io/v2/k8s-minikube/busybox/manifests/1.28.4-glibc": unauthorized: authentication failed
Warning Failed 7m43s (x4 over 9m15s) kubelet Error: ErrImagePull
Warning Failed 7m28s (x6 over 9m15s) kubelet Error: ImagePullBackOff
Normal BackOff 4m4s (x21 over 9m15s) kubelet Back-off pulling image "gcr.io/k8s-minikube/busybox:1.28.4-glibc"
-- /stdout --
helpers_test.go:285: <<< TestAddons/parallel/Registry FAILED: end of post-mortem logs <<<
helpers_test.go:286: ---------------------/post-mortem---------------------------------
--- FAIL: TestAddons/parallel/Registry (73.51s)